From: wessels <>
Date: Mon, 4 Oct 1999 11:04:00 +0000 (+0000)
Subject: 2.3 branch merge
X-Git-Tag: SQUID_3_0_PRE1~2116
X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9bc73deb181f454f4e5597c67c5b6b40fd4d0f58;p=thirdparty%2Fsquid.git
2.3 branch merge
---
diff --git a/ChangeLog b/ChangeLog
index 2a862b7075..fd7f410456 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,43 @@
+Changes to Squid-2.3.DEVEL2 ():
+
+ - Added --enable-truncate configure option.
+ - Updated Czech error messages ()
+ - Updated French error messages ()
+ - Updated Spanish error messages ()
+ - Added xrename() function for better debugging.
+ - Disallow empty ("") password in aclDecodeProxyAuth()
+ (BoB Miorelli).
+ - Fixed ACL SPLAY subdomain detection (again).
+ - Increased default 'request_body_max_size' from 100KB
+ to 1MB in cf.data.pre.
+ - Added 'content_length' member to request_t structure
+ so we don't have to use httpHdrGetInt() so often.
+ - Fixed repeatedly calling memDataInit() for every reconfigure.
+ - Cleaned up the case when fwdDispatch() cannot forward a
+ request. Error messages used to report "[no URL]".
+ - Added a check to return specific error messages for a
+ "store_digest" request when the digest entry doesn't exist
+ and we reach internalStart().
+ - Changed the interface of storeSwapInStart() to avoid a bug
+ where we closed "sc->swapin_sio" but couldn't set the
+ pointer to NULL.
+ - Changed storeDirClean() so that the rate it gets called
+ depends on the number of objects deleted.
+ - Some WCCP fixes.
+ - Added 'hostname_aliases' option to detect internal requests
+ (cache digests) when a cache has more than one hostname
+ in use.
+ - Async I/O NUMTHREADS now configurable with --enable-async-io=N
+ (Henrik Nordstrom).
+ - Added queue length to async I/O cachemgr stats (Henrik Nordstrom).
+ - Added OPTIONS request method.
+ - WCCP establishes and registers with the router faster.
+ - Added 'maxconn' acl type to limit the number of established
+ connections from a single client IP address. Submitted
+ by Vadim Kolontsov.
+ - Close FTP data socket as soon as transfer completes
+ (Alexander V. Lukyanov).
+
Changes to Squid-2.3.DEVEL1 ():
- Added WCCP support. This adds the 'wccp_router' squid.conf
@@ -25,7 +65,6 @@ Changes to Squid-2.3.DEVEL1 ():
- Added 'peer_connect_timeout' squid.conf option.
- Added 'redirector_bypass' squid.conf option.
- Added RFC 2518 (WEBDAV) request methods.
-
Changes to Squid-2.2 (April 19, 1999):
@@ -233,6 +272,25 @@ Changes to Squid-2.2 (April 19, 1999):
if a cache_dir subdirectory didn't exist.
- Fixed a buffer overrun bug in gb_to_str().
+ 2.2.STABLE4:
+
+ - Fixed a dread_ctrl leak caused in store_client.c
+ - Fixed a memory leak in eventRun().
+ - Fixed a memory leak of ErrorState structures due to
+ a bug in forward.c.
+ - Fixed detection of subdomain collisions for SPLAY trees.
+ - Fixed logging of hierarchy codes for SSL requests (Henrik
+ Nordstrom).
+ - Added some descriptions to mib.txt.
+ - Fixed a bug with non-hierarchical requests (e.g. POST)
+ and cache digests. We used to look up non-hierarchical
+ requests in peer digests. A false hit may cause Squid
+ to forward a request to a sibling. In combination with
+ 'Cache-control: only-if-cached, this generates 504 Gateway
+ Timeout responses and the request may not be re-forwardable.
+ - Fixed a filedescriptor leak for some aborted requests.
+
+
Changes to Squid-2.1 (November 16, 1998):
- Changed delayPoolsUpdate() to be called as an event.
diff --git a/cfgaux/config.guess b/cfgaux/config.guess
index d5da525231..1371b5fecb 100755
--- a/cfgaux/config.guess
+++ b/cfgaux/config.guess
@@ -555,6 +555,9 @@ EOF
news*:NEWS-OS:[56].*:*)
echo mips-sony-newsos${UNAME_RELEASE}
exit 0 ;;
+ *:Rhapsody:*:*)
+ echo `arch`-apple-rhapsody${UNAME_RELEASE}
+ exit 0;;
i?86:OS/2:*:*)
echo ${UNAME_MACHINE}-ibm-os2
exit 0 ;;
diff --git a/cfgaux/config.sub b/cfgaux/config.sub
index f751a9eed8..ac63ffeede 100755
--- a/cfgaux/config.sub
+++ b/cfgaux/config.sub
@@ -753,6 +753,9 @@ case $os in
-xenix)
os=-xenix
;;
+ -rhapsody*)
+ os=-rhapsody
+ ;;
-none)
;;
-os2)
diff --git a/configure b/configure
index 026173dc57..cb357e7826 100755
--- a/configure
+++ b/configure
@@ -27,7 +27,11 @@ ac_help="$ac_help
ac_help="$ac_help
--enable-carp Enable CARP support"
ac_help="$ac_help
- --enable-async-io Do ASYNC disk I/O using threads"
+ --enable-async-io[=N_THREADS]
+ Do ASYNC disk I/O using threads.
+ N_THREADS is the number of worker threads
+ defaults to 16. See also src/squid.h for
+ some additional platform tuning"
ac_help="$ac_help
--enable-icmp Enable ICMP pinging"
ac_help="$ac_help
@@ -95,6 +99,13 @@ ac_help="$ac_help
performance improvement, but may cause problems
when used with async I/O. Truncate uses more
filesystem inodes than unlink.."
+ac_help="$ac_help
+ --enable-underscores Squid by default rejects any host names with _
+ in their name to conform with internet standars.
+ If you disagree with this you may allow _ in
+ hostnames by using this switch, provided that
+ the resolver library on the host where Squid runs
+ does not reject _ in hostnames..."
# Initialize some variables set by options.
# The variables have the same names as the options, with
@@ -603,7 +614,7 @@ fi
-# From configure.in Revision: 1.176.2.3
+# From configure.in Revision: 1.177
ac_aux_dir=
for ac_dir in cfgaux $srcdir/cfgaux; do
if test -f $ac_dir/install-sh; then
@@ -631,7 +642,7 @@ else { echo "configure: error: can not run $ac_config_sub" 1>&2; exit 1; }
fi
echo $ac_n "checking host system type""... $ac_c" 1>&6
-echo "configure:635: checking host system type" >&5
+echo "configure:646: checking host system type" >&5
host_alias=$host
case "$host_alias" in
@@ -699,7 +710,7 @@ PRESET_CFLAGS="$CFLAGS"
# Extract the first word of "gcc", so it can be a program name with args.
set dummy gcc; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:703: checking for $ac_word" >&5
+echo "configure:714: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -728,7 +739,7 @@ if test -z "$CC"; then
# Extract the first word of "cc", so it can be a program name with args.
set dummy cc; ac_word=$2
echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:732: checking for $ac_word" >&5
+echo "configure:743: checking for $ac_word" >&5
if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then
echo $ac_n "(cached) $ac_c" 1>&6
else
@@ -776,7 +787,7 @@ fi
fi
echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works""... $ac_c" 1>&6
-echo "configure:780: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
+echo "configure:791: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5
ac_ext=c
# CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options.
@@ -786,11 +797,11 @@ ac_link='${CC-cc} -o conftest $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS
cross_compiling=$ac_cv_prog_cc_cross
cat > conftest.$ac_ext <
+A
+struct _storeSwapLogData {
+ char op;
+ int swap_file_number;
+ time_t timestamp;
+ time_t lastref;
+ time_t expires;
+ time_t lastmod;
+ size_t swap_file_sz;
+ u_short refcount;
+ u_short flags;
+ unsigned char key[MD5_DIGEST_CHARS];
+};
+
+
+
-Pøistup k dokumentu byl stornován v dùsledku nedostatèných pøÃstupových
-práv. Pokud jste pøesvìdèeni, ¾e se jedná o chybu, kontaktujte správce
-vzdáleného serveru.
+PøÃstup k dokumentu byl stornován v dùsledku nedostateèných pøÃstupových
+práv. Pokud jste pøesvìdèeni, ¾e se jedná o chybu, kontaktujte
+správce vzdáleného serveru.
+ Olouváme se, ale pøÃstup k:
+ Omlouváme se, ale pøÃstup k:
CHYBA
@@ -19,7 +19,7 @@ Cache Access Denied.
%U
je povolen a¾ po autorizaci.
Pøi pokusu o pøÃstup k: @@ -27,7 +27,7 @@ je dostupn
Pro úspì¹nou autorizaci potøebujete prohlþeèe NETSCAPE 2.0 a vy¹¹Ã, Microsoft Internet Explorer 3.0 nebo prohlþeè podporujÃcà protokol -HTTP/1.1. V pøÃpadì problému se prosÃm obra»te nacache +HTTP/1.1. V pøÃpadì problému se prosÃm obra»te na cache administratora nebo si zmìòte -heslo. -
heslo. + \ No newline at end of file diff --git a/errors/Czech/ERR_CANNOT_FORWARD b/errors/Czech/ERR_CANNOT_FORWARD index 9fb2437a09..f95d5cc34f 100644 --- a/errors/Czech/ERR_CANNOT_FORWARD +++ b/errors/Czech/ERR_CANNOT_FORWARD @@ -13,7 +13,7 @@ se objevila nse objevila následujÃcà chyba:
-Jméno serveru nenà mo¾no pøevést na IP adresu +Jméno serveru nenà mo¾no pøevést na IP adresu. %H@@ -23,9 +23,11 @@ DNS server odpov
To znamená ¾e: -
++
-Cache server nepøesmìruje Vá¹ po¾adavek, proto¾e by to nedovolujà vztahy v -hierarchické struktuøe cache serverù. %i je pravd¾podobnì chybnì +Cache server nepøesmìruje Vá¹ po¾adavek, proto¾e to nedovolujà vztahy v +hierarchické struktuøe cache serverù. %i je pravdìpodobnì chybnì nakonfigurovaný cache server.
diff --git a/errors/Czech/ERR_FTP_DISABLED b/errors/Czech/ERR_FTP_DISABLED index 76bd902fd3..ffd4a34ccd 100644 --- a/errors/Czech/ERR_FTP_DISABLED +++ b/errors/Czech/ERR_FTP_DISABLED @@ -1,6 +1,5 @@ -heslo.-Squid odeslal následujÃcà FTP pøÃkaz: +Squid odeslal následujÃcà FTP pøÃkaz:
a obdr¾el tuto odpovìï:%f
diff --git a/errors/Czech/ERR_FTP_FORBIDDEN b/errors/Czech/ERR_FTP_FORBIDDEN index 5f1b5e9843..929590757e 100644 --- a/errors/Czech/ERR_FTP_FORBIDDEN +++ b/errors/Czech/ERR_FTP_FORBIDDEN @@ -1,17 +1,18 @@ -%F
-An FTP authentication failure occurred -while trying to retrieve the URL: +Pøi pokusu o pøÃstup k URL: %U +se objevila chyba autorizace FTP.
-Squid sent the following FTP command: +Squid odeslal následujÃcà FTP pøÃkaz:
-and then received this reply +a obdr¾el tuto odpovìï:%f
%F
diff --git a/errors/Czech/ERR_FTP_NOT_FOUND b/errors/Czech/ERR_FTP_NOT_FOUND index a9b3f04406..3a76152a5d 100644 --- a/errors/Czech/ERR_FTP_NOT_FOUND +++ b/errors/Czech/ERR_FTP_NOT_FOUND @@ -1,21 +1,22 @@ -%g
-The following URL could not be retrieved: +Po¾adovaný dokument na adrese: %U +nebyl nalezen.
-Squid sent the following FTP command: +Squid odeslal následujÃcà FTP pøÃkaz:
-and then received this reply +a obdr¾el tuto odpovìï:%f
%F
%g
-This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +To mù¾e být zapøÃèinìno pou¾itÃm absolutnà cesty v FTP URL (co¾ odporuje RFC +1738). V tomto pøÃpadì mù¾e být dokument nalezen na %B.
diff --git a/errors/Czech/ERR_FTP_PUT_ERROR b/errors/Czech/ERR_FTP_PUT_ERROR index ace7f7fdce..00d7bb4386 100644 --- a/errors/Czech/ERR_FTP_PUT_ERROR +++ b/errors/Czech/ERR_FTP_PUT_ERROR @@ -1,8 +1,16 @@ -Pøi -pokusu o PUT následujÃcÃho URL: %U
odeslal Squid -následujÃcà FTP pøÃkaz:
+ + +CHYBA: neuspesny FTP upload + +CHYBA
+FTP PUT/upload neúspì¹ný
+
++Pøi pokusu o PUT následujÃcÃho URL: +%U +
+odeslal Squid následujÃcà FTP pøÃkaz: +
%fa obdr¾el tuto odpovìï @@ -11,8 +19,10 @@ a obdr
Zkuste: -
++
-The FTP server was too busy while trying to retrieve the URL: +FTP server je pøetþen a nemù¾e poslat nasledujÃcà dokument: %U
-Squid sent the following FTP command: +Squid odeslal následujÃcà FTP pøÃkaz:
-and then received this reply +a obdr¾el tuto odpovìï:%f
%F
diff --git a/errors/Czech/ERR_INVALID_REQ b/errors/Czech/ERR_INVALID_REQ index 70e1eb2f9b..8e51ebf355 100644 --- a/errors/Czech/ERR_INVALID_REQ +++ b/errors/Czech/ERR_INVALID_REQ @@ -6,7 +6,6 @@%g
-While trying to process the request: Pøi provádìnà po¾adavku:
%R @@ -16,18 +15,18 @@ se objevila n
-Èáast HTTP po¾adavku je chybná: +Èást HTTP po¾adavku je chybná:
-Squid stornoval po¾adavek z dùvodu pøekoroènà maximálnà délky trvánà -spojenÃ. +Squid stornoval po¾adavek z dùvodu pøekroèenà maximálnà délky trvánà spojenÃ.
diff --git a/errors/Czech/ERR_NO_RELAY b/errors/Czech/ERR_NO_RELAY index ef0f901e04..91e197b6ba 100644 --- a/errors/Czech/ERR_NO_RELAY +++ b/errors/Czech/ERR_NO_RELAY @@ -13,10 +13,10 @@ se objevila n-Cache server nema definovaný WAIS Relay! Vyhubujte administratorovi. -
\ No newline at end of file +Cache server nemá definovaný WAIS Relay! Vyhubujte administratorovi. + \ No newline at end of file diff --git a/errors/Czech/ERR_ONLY_IF_CACHED_MISS b/errors/Czech/ERR_ONLY_IF_CACHED_MISS index 19bcb3c973..c3d92f4212 100644 --- a/errors/Czech/ERR_ONLY_IF_CACHED_MISS +++ b/errors/Czech/ERR_ONLY_IF_CACHED_MISS @@ -13,15 +13,9 @@ se objevila nonly-if-cached.
+Po¾adovaný dokument se nenacházà v cachi, pøièem¾ byla specifikována direktiva
+only-if-cached.
-
-You have issued a request with a only-if-cached cache control
-directive. The document was not found in the cache, or it required
-revalidation prohibited by only-if-cached directive.
-
Pøi pokusu o pøÃstup k: %U
se objevila následujÃcà chyba: -
+
-While trying to retrieve the URL: +Pøi pokusu o pøÃstup k: %U
-The following error was encountered: +se objevila následujÃcà chyba:
-If you are making a POST or PUT request, then your request body -(the thing you are trying to upload) is too large. If you are -making a GET request, then the reply body (what you are trying -to download) is too large. These limits have been established -by the Internet Service Provider who operates this cache. Please -contact them directly if you feel this is an error.
-Your cache administrator is %w. +Pøi pou¾ità metody POST, PUT nebo GET byl po¾adovaný dokument pøÃli¹ +velký a pøekroèil mo¾ný limit povolený k pøenosu (pravdìpodobnì jste +se sna¾ili o posÃlánÃ/pøÃjem dokumentu na server nebo ze serveru). +Tento limit byl nastaven administrátorem této cache. Pokud si myslÃte, +¾e je potøeba tyto limity zmìnit, kontaktujte ho. +
\ No newline at end of file diff --git a/errors/Czech/ERR_UNSUP_REQ b/errors/Czech/ERR_UNSUP_REQ index 608b7796ec..2bc19a4856 100644 --- a/errors/Czech/ERR_UNSUP_REQ +++ b/errors/Czech/ERR_UNSUP_REQ @@ -13,12 +13,12 @@ se objevila nSquid nepodporuje v¹echny typy metod u v¹ech protokolù. Napø. nenà mo¾no -pou¾it metodu POST u slu¾by GOPHER +pou¾it metodu POST u slu¾by GOPHER.
diff --git a/errors/Czech/ERR_URN_RESOLVE b/errors/Czech/ERR_URN_RESOLVE index 9225b62ac9..0752eb4c9c 100644 --- a/errors/Czech/ERR_URN_RESOLVE +++ b/errors/Czech/ERR_URN_RESOLVE @@ -1,22 +1,22 @@ --Pøi pokusu o pøÃstup k URN: +Pøi pokusu o pøÃstup k URN: %U
se objevila následujÃcà chyba:
-Hey, don't expect too much from URNs on %T :) +Hey, neoèekáváte pøÃli¹ mnoho od URN na %T :)
diff --git a/errors/Czech/ERR_WRITE_ERROR b/errors/Czech/ERR_WRITE_ERROR index 56d3af4c19..8c889906fd 100644 --- a/errors/Czech/ERR_WRITE_ERROR +++ b/errors/Czech/ERR_WRITE_ERROR @@ -13,7 +13,7 @@ se objevila n%E
-Chyba zápisu na sû. Opakjte prosÃm po¾adavek. +Chyba zápisu na sû. Opakujte prosÃm po¾adavek.
diff --git a/errors/Czech/ERR_ZERO_SIZE_OBJECT b/errors/Czech/ERR_ZERO_SIZE_OBJECT index 164337c1b7..9b708524ec 100644 --- a/errors/Czech/ERR_ZERO_SIZE_OBJECT +++ b/errors/Czech/ERR_ZERO_SIZE_OBJECT @@ -13,10 +13,10 @@ se objevila n-Squid neobdr¾el v opdovìdi na tento dotaz ¾ádná data. +Squid neobdr¾el v odpovìdi na tento dotaz ¾ádná data.
diff --git a/errors/Czech/README b/errors/Czech/README index 055ab1618c..d8ababf0fd 100644 --- a/errors/Czech/README +++ b/errors/Czech/README @@ -1,2 +1,2 @@ -Thank you to Jakub Nantl-An FTP authentication failure occurred -while trying to retrieve the URL: +Une erreur d'authentification sur un FTP a eu lieu. +En tentant de charger l'URL: %U
-Squid sent the following FTP command: +Squid a envoyé la commande FTP suivante:
-and then received this reply +et a recu en réponse%f
%F
diff --git a/errors/French/ERR_FTP_NOT_FOUND b/errors/French/ERR_FTP_NOT_FOUND index a9b3f04406..86b14309ca 100644 --- a/errors/French/ERR_FTP_NOT_FOUND +++ b/errors/French/ERR_FTP_NOT_FOUND @@ -1,21 +1,22 @@ -%g
-The following URL could not be retrieved: +L'URL suivante n'a pu être chargée: %U
-Squid sent the following FTP command: +Squid a envoyé la commande FTP suivante:
-and then received this reply +et a recu en retour:%f
%F
%g
-This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +Ceci pourrait etre causé par une URL de FTP avec un +chemin absolu (ce qui n'est pas conforme au RFC 1738) . Si c'est +effectivement le cas, alors le fichier se trouve à +l'adresse %B.
diff --git a/errors/French/ERR_FTP_UNAVAILABLE b/errors/French/ERR_FTP_UNAVAILABLE index d330368f3b..6d0e2c659a 100644 --- a/errors/French/ERR_FTP_UNAVAILABLE +++ b/errors/French/ERR_FTP_UNAVAILABLE @@ -1,16 +1,16 @@ --The FTP server was too busy while trying to retrieve the URL: +Le serveur FTP était trop encombré pour charger l'URL: %U
-Squid sent the following FTP command: +Squid a envoyé la commande FTP suivante:
-and then received this reply +et a recu la réponse suivante en retour:%f
%F
diff --git a/errors/French/ERR_SHUTTING_DOWN b/errors/French/ERR_SHUTTING_DOWN index 03ca2ceb0c..b9314cfaec 100644 --- a/errors/French/ERR_SHUTTING_DOWN +++ b/errors/French/ERR_SHUTTING_DOWN @@ -1,17 +1,17 @@ -%g
-En tentant de résoudre l'URL: +En essayant de charger l'URL: %U
-L'erreur suivante a été rencontrée: +L'erreur suivante fut rencontrée:
-La mise hors-service de ce cache est en cours et il est impossible -de traiter votre requete actuellement. Veuillez -réitérer votre requete ultérieurement. +Ce cache est en cours de mise hors-service temporaire et il +lui est impossible de satisfaire votre requete actuellement. +Veuillez réiterer votre requete ultérieurement.
diff --git a/errors/Italian/ERR_FTP_FORBIDDEN b/errors/Italian/ERR_FTP_FORBIDDEN index 5f1b5e9843..825aa19f2e 100644 --- a/errors/Italian/ERR_FTP_FORBIDDEN +++ b/errors/Italian/ERR_FTP_FORBIDDEN @@ -1,17 +1,17 @@ --An FTP authentication failure occurred -while trying to retrieve the URL: +Un errore di autenticazione tramite FTP è avvenuto +mentre si cercava di recuperare la URL: %U
-Squid sent the following FTP command: +Squid ha inviato il seguente comando FTP:
-and then received this reply +e ha ricevuto la seguente risposta:%f
%F
diff --git a/errors/Italian/ERR_FTP_NOT_FOUND b/errors/Italian/ERR_FTP_NOT_FOUND index a9b3f04406..82163a587e 100644 --- a/errors/Italian/ERR_FTP_NOT_FOUND +++ b/errors/Italian/ERR_FTP_NOT_FOUND @@ -1,21 +1,21 @@ -%g
-The following URL could not be retrieved: +La seguente URL non può essere recuperata: %U
-Squid sent the following FTP command: +Squid ha inviato il seguente comando FTP:
-and then received this reply +e ha ricevuto la risposta:%f
%F
%g
-This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +Questo potrebbe essere dovuto ad una URL FTP con un percorso assoluto +(che non è conforme al RFC 1738). In tal caso, si può +recuperare il file in %B.
diff --git a/errors/Italian/ERR_FTP_UNAVAILABLE b/errors/Italian/ERR_FTP_UNAVAILABLE index d330368f3b..6ca6fa3c8d 100644 --- a/errors/Italian/ERR_FTP_UNAVAILABLE +++ b/errors/Italian/ERR_FTP_UNAVAILABLE @@ -1,16 +1,16 @@ --The FTP server was too busy while trying to retrieve the URL: +Il server FTP era troppo occupato al momento di recuperare la URL: %U
-Squid sent the following FTP command: +Squid ha inviato il seguente comando FTP:
-and then received this reply +e ha ricevuto la seguente risposta:%f
%F
diff --git a/errors/Italian/ERR_SHUTTING_DOWN b/errors/Italian/ERR_SHUTTING_DOWN index 3cc9afaa05..3d4b4843ad 100644 --- a/errors/Italian/ERR_SHUTTING_DOWN +++ b/errors/Italian/ERR_SHUTTING_DOWN @@ -1,17 +1,17 @@ -%g
-While trying to retrieve the URL: +Cercando di recuperare la URL: %U
-The following error was encountered: +È occorso il seguente errore:
-This cache is in the process of shutting down and can not -service your request at this time. Please retry your -request again soon. +Questa cache è in fase di shutdown e non può +provvedere alla vostra richiesta in questo momento. Si prega +di riprovare più tardi.
diff --git a/errors/Italian/README b/errors/Italian/README index 1459496602..649ec42af2 100644 --- a/errors/Italian/README +++ b/errors/Italian/README @@ -1,2 +1,2 @@ -Thank you to Alessio Bragadini-An FTP authentication failure occurred -while trying to retrieve the URL: +Pri pokuse zÃska» URL sa vyskytla chyba autentifikácie: %U
-Squid sent the following FTP command: +Squid zaslal nasledujúci FTP prÃkaz:
-and then received this reply +a obdr¾al nasledovnú opoveï:%f
%F
diff --git a/errors/Slovak/ERR_FTP_NOT_FOUND b/errors/Slovak/ERR_FTP_NOT_FOUND index a9b3f04406..3b47dca803 100644 --- a/errors/Slovak/ERR_FTP_NOT_FOUND +++ b/errors/Slovak/ERR_FTP_NOT_FOUND @@ -1,21 +1,21 @@ -%g
-The following URL could not be retrieved: +Nasledovné URL je nedostupné: %U
-Squid sent the following FTP command: +Squid zaslal nasledujúci FTP prÃkaz:
-and then received this reply +a obdr¾al nasledovnú opoveï:%f
%F
%g
-This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +To mô¾e by» spôsobené uvedenÃm absolútnej cesty v FTP URL (èo odporuje RFC +1738). V tomto prÃpade by ste dokument mohli nájs» na %B.
diff --git a/errors/Slovak/ERR_FTP_UNAVAILABLE b/errors/Slovak/ERR_FTP_UNAVAILABLE index d330368f3b..f99f13146d 100644 --- a/errors/Slovak/ERR_FTP_UNAVAILABLE +++ b/errors/Slovak/ERR_FTP_UNAVAILABLE @@ -1,16 +1,17 @@ --The FTP server was too busy while trying to retrieve the URL: +FTP server bol prÃli¹ zaneprázdnený pri pokuse zÃskat nasledovné URL: %U
-Squid sent the following FTP command: +Squid zaslal nasledujúci FTP prÃkaz:
-and then received this reply +a obdr¾al nasledovnú opoveï:%f
%F
diff --git a/errors/Slovak/ERR_SHUTTING_DOWN b/errors/Slovak/ERR_SHUTTING_DOWN index 3cc9afaa05..0bcf81d3de 100644 --- a/errors/Slovak/ERR_SHUTTING_DOWN +++ b/errors/Slovak/ERR_SHUTTING_DOWN @@ -1,17 +1,17 @@ -%g
-While trying to retrieve the URL: +Pri pokuse o prÃstup k: %U
-The following error was encountered: +do¹lo k nasledovnej chybe:
-This cache is in the process of shutting down and can not -service your request at this time. Please retry your -request again soon. +Táto cache sa práve vypÃna a nemô¾e spracova» Va¹u po¾iadavku. Opakujte +prosÃm svoju po¾iadavku za nejaký èas.
diff --git a/errors/Spanish/ERR_ACCESS_DENIED b/errors/Spanish/ERR_ACCESS_DENIED index f67fd77ca7..c34371b460 100644 --- a/errors/Spanish/ERR_ACCESS_DENIED +++ b/errors/Spanish/ERR_ACCESS_DENIED @@ -15,8 +15,8 @@ Ha ocurrido el siguiente problema: Acceso Denegado.-Las reglas de control de acceso impiden que su petición sea +Las reglas de control de acceso impiden que su petición sea permitida en este momento. Contacte con su proveedor de servicios si cree que esto es incorrecto. -
+Mientras se intentaba traer el URL: +%U +
+Ha ocurrido el siguiente problema: +
+Disculpe, Ud. no está autorizado a acceder a: +
%U+desde este cache hasta que se haya autenticado. + + +
+Ud. necesita utilizar Netscape version 2.0 o superior, o Microsoft Internet +Explorer 3.0, o un navegador que cumpla con HTTP/1.1 para que funcione. +Por favor contacte al administrador del cache si +tiene dificultad para autenticarse o +cambie su password. +
diff --git a/errors/Spanish/ERR_CACHE_MGR_ACCESS_DENIED b/errors/Spanish/ERR_CACHE_MGR_ACCESS_DENIED new file mode 100644 index 0000000000..dcd0d1d911 --- /dev/null +++ b/errors/Spanish/ERR_CACHE_MGR_ACCESS_DENIED @@ -0,0 +1,31 @@ + ++Mientras se intentaba traer el URL: +%U +
+Ha ocurrido el siguiente problema: +
Disculpe, Ud. no está autorizado a acceder a: +
%U+desde este cache hasta que se haya autenticado. + +
+Ud. necesita utilizar Netscape version 2.0 o superior, o Microsoft Internet +Explorer 3.0, o un navegador que cumpla con HTTP/1.1 para que funcione. +Por favor contacte al administrador del cache si +tiene dificultad para autenticarse, o si Ud. es el administrador, +lea la documentación de Squid sobre interfaz del cache manager y +chequee en el log de cache mensajes de error más detallados. +
diff --git a/errors/Spanish/ERR_CANNOT_FORWARD b/errors/Spanish/ERR_CANNOT_FORWARD index 3ff3b090d4..e88dfc1029 100644 --- a/errors/Spanish/ERR_CANNOT_FORWARD +++ b/errors/Spanish/ERR_CANNOT_FORWARD @@ -12,18 +12,18 @@ Ha ocurrido el siguiente problema:-La petición no ha podido ser enviada al servidor origen o a alguna -de las cachés padres. Las razones más probables de que haya ocurrido -este error son: +La petición no ha podido ser enviada al servidor origen o a alguna +de las cachés padres. Las razones más probables de que haya +ocurrido este error son:
El equipo remoto o la red pueden estar fuera de servicio. -Por favor, intente de nuevo la petición en otro momento. +Por favor, intente de nuevo la petición. diff --git a/errors/Spanish/ERR_DNS_FAIL b/errors/Spanish/ERR_DNS_FAIL index 33668081a0..290127187b 100644 --- a/errors/Spanish/ERR_DNS_FAIL +++ b/errors/Spanish/ERR_DNS_FAIL @@ -9,11 +9,10 @@ Mientras se intentaba traer el URL:
Ha ocurrido el siguiente problema:
-Incapaz de determinar la dirección IP a partir +Incapaz de determinar la dirección IP a partir del nombre de la máquina: %H-
El programa dnsserver ha devuelto el siguiente mensaje: @@ -24,9 +23,8 @@ El programa dnsserver ha devuelto el siguiente mensaje:
Esto significa que:
- La caché no ha sido capaz de resolver el nombre de máquina + La caché no ha sido capaz de resolver el nombre de máquina presente en la URL. - Compruebe que la dirección sea correcta. + Compruebe que la dirección sea correcta.- diff --git a/errors/Spanish/ERR_FORWARDING_DENIED b/errors/Spanish/ERR_FORWARDING_DENIED index 027fe7c882..c689582f1f 100644 --- a/errors/Spanish/ERR_FORWARDING_DENIED +++ b/errors/Spanish/ERR_FORWARDING_DENIED @@ -12,12 +12,12 @@ Ha ocurrido el siguiente problema:
-Esta caché no permite reenviar su petición porque trata de obligar a -mantener una relación de hermandad. -Quizás el cliente en %i es una caché que ha sido mal configurada. +Esta caché no permite reenviar su petición porque trata de +obligar a mantener una relación de hermandad. +Quizás el cliente en %i es una caché que ha sido mal configurada.
diff --git a/errors/Spanish/ERR_FTP_DISABLED b/errors/Spanish/ERR_FTP_DISABLED index 34e0c3b1fa..12d061c03f 100644 --- a/errors/Spanish/ERR_FTP_DISABLED +++ b/errors/Spanish/ERR_FTP_DISABLED @@ -17,5 +17,5 @@ Servicio FTP deshabilitado-Esta caché no proporciona servicio caché para el protocolo FTP. +Esta caché no proporciona servicio caché para el protocolo FTP.
diff --git a/errors/Spanish/ERR_FTP_FAILURE b/errors/Spanish/ERR_FTP_FAILURE index 5d2fa4ebc6..210a2dfb64 100644 --- a/errors/Spanish/ERR_FTP_FAILURE +++ b/errors/Spanish/ERR_FTP_FAILURE @@ -5,17 +5,13 @@-Ha ocurrido un error de protocolo FTP: - +Ha ocurrido un error de protocolo FTP mientras se intentaba traer el documento con URL: %U
-Squid envió la siguiente orden FTP: +Squid envió la siguiente orden FTP:
-y recibió la siguiente respuesta: +y recibió la siguiente respuesta:%f
%F
%g
-Esto puede ser causado por una URL de protocolo FTP con una ruta -de directorios absoluto(que no cumple el RFC 1738). Si esta es la -causa, el fichero podrÃa encontrarse en %B. diff --git a/errors/Spanish/ERR_FTP_FORBIDDEN b/errors/Spanish/ERR_FTP_FORBIDDEN index 5f1b5e9843..a7e71c943c 100644 --- a/errors/Spanish/ERR_FTP_FORBIDDEN +++ b/errors/Spanish/ERR_FTP_FORBIDDEN @@ -1,18 +1,18 @@
--An FTP authentication failure occurred -while trying to retrieve the URL: +Ha ocurrido una falla de autenticación cuando se +trataba de conseguir el URL: %U
-Squid sent the following FTP command: -
-and then received this reply -%f
-%F
- +Squid envió el siguiente comando FTP: +%g
+y recibió esta respuesta +%f
+%F
+ diff --git a/errors/Spanish/ERR_FTP_NOT_FOUND b/errors/Spanish/ERR_FTP_NOT_FOUND index a9b3f04406..9e73edc423 100644 --- a/errors/Spanish/ERR_FTP_NOT_FOUND +++ b/errors/Spanish/ERR_FTP_NOT_FOUND @@ -1,21 +1,21 @@ -%g
-The following URL could not be retrieved: +El siguiente URL no pudo ser obtenido: %U
-Squid sent the following FTP command: -
-and then received this reply -%f
-%F
+Squid envió el siguiente comando FTP: +%g
+y recibió esta respuesta +%f
+%F
%g
-This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +Esto puede ser causado por un URL FTP con un camino absoluto (y por +lo tanto no cumple con RFC 1738). Si este es la causa, entonces el +archivo puede ser obtenido en %B.
diff --git a/errors/Spanish/ERR_FTP_PUT_CREATED b/errors/Spanish/ERR_FTP_PUT_CREATED new file mode 100644 index 0000000000..860151d5f9 --- /dev/null +++ b/errors/Spanish/ERR_FTP_PUT_CREATED @@ -0,0 +1,9 @@ + ++
+ diff --git a/errors/Spanish/ERR_FTP_PUT_ERROR b/errors/Spanish/ERR_FTP_PUT_ERROR new file mode 100644 index 0000000000..296b0db5d3 --- /dev/null +++ b/errors/Spanish/ERR_FTP_PUT_ERROR @@ -0,0 +1,24 @@ + ++Mientras se intentaba hacer un PUT del siguiente URL: +%U +
+Squid envió el siguiente comando FTP: +
+ %f ++y recibió esta respuesta +
+ %F ++
+Esto significa que: +
+Chequee el camino, los permisos, espacio en disco e intente nuevamente. ++ diff --git a/errors/Spanish/ERR_FTP_PUT_MODIFIED b/errors/Spanish/ERR_FTP_PUT_MODIFIED new file mode 100644 index 0000000000..0d61f644c3 --- /dev/null +++ b/errors/Spanish/ERR_FTP_PUT_MODIFIED @@ -0,0 +1,9 @@ + +
+
+ diff --git a/errors/Spanish/ERR_FTP_UNAVAILABLE b/errors/Spanish/ERR_FTP_UNAVAILABLE index d330368f3b..70dbfa072e 100644 --- a/errors/Spanish/ERR_FTP_UNAVAILABLE +++ b/errors/Spanish/ERR_FTP_UNAVAILABLE @@ -1,17 +1,17 @@ --The FTP server was too busy while trying to retrieve the URL: +El servidor FTP estaba muy ocupado cuando se intentaba obtener el URL: %U
-Squid sent the following FTP command: -
-and then received this reply -%f
-%F
- +Squid envió el siguiente comando FTP: +%g
+y recibió esta respuesta +%f
+%F
+ diff --git a/errors/Spanish/ERR_INVALID_REQ b/errors/Spanish/ERR_INVALID_REQ index 7f3974985c..4d801c2c68 100644 --- a/errors/Spanish/ERR_INVALID_REQ +++ b/errors/Spanish/ERR_INVALID_REQ @@ -14,19 +14,21 @@ Ha ocurrido el siguiente problema:%g
-Algún aspecto de la petición HTTP no es válido. Posibles problemas: +Algún aspecto de la petición HTTP no es válido. +Posibles problemas:
-Algún aspecto del URL solicitado es incorrecto. Posibles problemas: +Algún aspecto del URL solicitado es incorrecto. Posibles problemas:
-Squid ha dado por terminada la petición porque se ha excedido -el tiempo de vida máximo para una conexión. +Squid ha dado por terminada la petición porque se ha excedido +el tiempo de vida máximo para una conexión.
diff --git a/errors/Spanish/ERR_NO_RELAY b/errors/Spanish/ERR_NO_RELAY index da980c8ce3..f12c124bd7 100644 --- a/errors/Spanish/ERR_NO_RELAY +++ b/errors/Spanish/ERR_NO_RELAY @@ -17,5 +17,5 @@ No hay una pasarela para protocolo Wais.-Esta caché no tiene definido ninguna pasarela para el protocolo WAIS !! -¡GrÃteselo al administrador de la caché ! +Esta caché no tiene definido ninguna pasarela para el protocolo WAIS !! +¡Gríteselo al administrador de la caché ! diff --git a/errors/Spanish/ERR_ONLY_IF_CACHED_MISS b/errors/Spanish/ERR_ONLY_IF_CACHED_MISS new file mode 100644 index 0000000000..3f7500f909 --- /dev/null +++ b/errors/Spanish/ERR_ONLY_IF_CACHED_MISS @@ -0,0 +1,27 @@ +
++Mientras se intentaba traer el URL: +%U +
+Ha ocurrido el siguiente problema: +
only-if-cached.
+
+
+
+Ud. ha enviado una solicitud con la directiva de control de la caché
+only-if-cached. El documento no fue encontrado en la caché,
+o requiere revalidación prohibida por la directiva
+only-if-cached.
+
+
El sistema ha devuelto el siguiente mensaje:
%E-
-Ha ocurrido algún problema mientras se leÃan datos de la red. -Por favor, inténtelo de nuevo. +Ha ocurrido algún problema mientras se leían datos de la red. +Por favor, inténtelo de nuevo.
diff --git a/errors/Spanish/ERR_READ_TIMEOUT b/errors/Spanish/ERR_READ_TIMEOUT index d1358d6514..a666a31be8 100644 --- a/errors/Spanish/ERR_READ_TIMEOUT +++ b/errors/Spanish/ERR_READ_TIMEOUT @@ -12,7 +12,7 @@ Ha ocurrido el siguiente problema:%E
-Se acabó el tiempo máximo de lectura mientras se leÃan datos de la red. -La red o el servidor pueden estar congestionados. Por favor, inténtelo de nuevo +Se acabó el tiempo máximo de lectura mientras se leían +datos de la red. La red o el servidor pueden estar congestionados. Por +favor, inténtelo de nuevo
diff --git a/errors/Spanish/ERR_SHUTTING_DOWN b/errors/Spanish/ERR_SHUTTING_DOWN index 3cc9afaa05..63de81a0ac 100644 --- a/errors/Spanish/ERR_SHUTTING_DOWN +++ b/errors/Spanish/ERR_SHUTTING_DOWN @@ -1,17 +1,18 @@ --While trying to retrieve the URL: +Mientras se intentaba traer el URL: %U
-The following error was encountered: +Ha ocurrido el siguiente problema:
-This cache is in the process of shutting down and can not -service your request at this time. Please retry your -request again soon. -
+ +Esta caché esta siendo desactivado y no puede atender su +solicitud en este momento. Por favor reintente su solicitud +nuevamente más tarde. + +Squid ha sido incapaz de crear un puerto TCP, posiblemente debido -al exceso de carga. Inténtelo de nuevo, por favor. +al exceso de carga. Inténtelo de nuevo, por favor. diff --git a/errors/Spanish/ERR_UNSUP_REQ b/errors/Spanish/ERR_UNSUP_REQ index db599d0ca5..b768b70fe5 100644 --- a/errors/Spanish/ERR_UNSUP_REQ +++ b/errors/Spanish/ERR_UNSUP_REQ @@ -12,10 +12,10 @@ Ha ocurrido el siguiente problema:
-Squid no admite todos los métodos para todos los protocolos de acceso. +Squid no admite todos los métodos para todos los protocolos de acceso. Por ejemplo, no se puede hacer un POST a un servidor Gopher. diff --git a/errors/Spanish/ERR_URN_RESOLVE b/errors/Spanish/ERR_URN_RESOLVE index 1a12458606..d01c90dcca 100644 --- a/errors/Spanish/ERR_URN_RESOLVE +++ b/errors/Spanish/ERR_URN_RESOLVE @@ -17,5 +17,5 @@ No se puede resolver el URN
-Hey, no espere mucho sobre URNs en %T :) +Hey, no espere mucho de URNs en %T :)
diff --git a/errors/Spanish/ERR_WRITE_ERROR b/errors/Spanish/ERR_WRITE_ERROR index 712dbbaa77..dcda5151e0 100644 --- a/errors/Spanish/ERR_WRITE_ERROR +++ b/errors/Spanish/ERR_WRITE_ERROR @@ -21,5 +21,5 @@ El sistema ha devuelto el siguiente mensaje:%E
-Se ha producido un error mientras se escribÃan datos en la red. -Por favor, inténtelo de nuevo. +Se ha producido un error mientras se escribían datos en la red. +Por favor, inténtelo de nuevo. diff --git a/errors/Spanish/ERR_ZERO_SIZE_OBJECT b/errors/Spanish/ERR_ZERO_SIZE_OBJECT index 6256c92683..a5535ffe54 100644 --- a/errors/Spanish/ERR_ZERO_SIZE_OBJECT +++ b/errors/Spanish/ERR_ZERO_SIZE_OBJECT @@ -12,10 +12,11 @@ Ha ocurrido el siguiente problema:
-Squid no ha recibido ninguna información en respuesta a esta petición. +Squid no ha recibido ninguna información en respuesta a esta +petición.
diff --git a/errors/Spanish/README b/errors/Spanish/README index 051b3f0299..163e8d56e4 100644 --- a/errors/Spanish/README +++ b/errors/Spanish/README @@ -1,5 +1,3 @@ -Thanks to Javier Puche\n");
@@ -697,7 +693,7 @@ ftpHtmlifyListEntry(char *line, FtpStateData * ftpState)
}
}
/* {icon} {text} . . . {date}{size}{chdir}{view}{download}{link}\n */
- xstrncpy(href, rfc1738_escape(parts->name), 2048);
+ xstrncpy(href, rfc1738_escape_part(parts->name), 2048);
xstrncpy(text, parts->showname, 2048);
switch (parts->type) {
case 'd':
@@ -831,11 +827,13 @@ ftpReadComplete(FtpStateData * ftpState)
{
debug(9, 3) ("ftpReadComplete\n");
/* Connection closed; retrieval done. */
- if (ftpState->flags.html_header_sent)
- ftpListingFinish(ftpState);
- if (!ftpState->flags.put) {
- storeTimestampsSet(ftpState->entry);
- fwdComplete(ftpState->fwd);
+ if (ftpState->data.fd > -1) {
+ /*
+ * close data socket so it does not occupy resources while
+ * we wait
+ */
+ comm_close(ftpState->data.fd);
+ ftpState->data.fd = -1;
}
/* expect the "transfer complete" message on the control socket */
ftpScheduleReadControlReply(ftpState, 1);
@@ -895,8 +893,9 @@ ftpDataRead(int fd, void *data)
data,
Config.Timeout.read);
} else {
- assert(mem->inmem_hi > 0);
- ftpDataTransferDone(ftpState);
+ ftpFailed(ftpState, ERR_READ_ERROR);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
+ return;
}
} else if (len == 0) {
ftpReadComplete(ftpState);
@@ -930,12 +929,10 @@ ftpCheckAuth(FtpStateData * ftpState, const HttpHeader * req_hdr)
char *orig_user;
const char *auth;
ftpLoginParser(ftpState->request->login, ftpState, FTP_LOGIN_ESCAPED);
- if (ftpState->user[0] && ftpState->password[0])
- return 1; /* name and passwd both in URL */
- if (!ftpState->user[0] && !ftpState->password[0])
- return 1; /* no name or passwd */
- if (ftpState->password[0])
- return 1; /* passwd with no name? */
+ if (!ftpState->user[0])
+ return 1; /* no name */
+ if (ftpState->password_url || ftpState->password[0])
+ return 1; /* passwd provided in URL */
/* URL has name, but no passwd */
if (!(auth = httpHeaderGetAuth(req_hdr, HDR_AUTHORIZATION, "Basic")))
return 0; /* need auth header */
@@ -967,7 +964,6 @@ ftpCheckUrlpath(FtpStateData * ftpState)
ftpState->flags.use_base = 1;
/* check for null path */
if (!l) {
- stringReset(&request->urlpath, "/");
ftpState->flags.isdir = 1;
ftpState->flags.root_dir = 1;
} else if (!strCmp(request->urlpath, "/%2f/")) {
@@ -1005,6 +1001,21 @@ ftpBuildTitleUrl(FtpStateData * ftpState)
if (request->port != urlDefaultPort(PROTO_FTP))
snprintf(&t[strlen(t)], len - strlen(t), ":%d", request->port);
strcat(t, strBuf(request->urlpath));
+ t = ftpState->base_href = xcalloc(len, 1);
+ strcat(t, "ftp://");
+ if (strcmp(ftpState->user, "anonymous")) {
+ strcat(t, rfc1738_escape_part(ftpState->user));
+ if (ftpState->password_url) {
+ strcat(t, ":");
+ strcat(t, rfc1738_escape_part(ftpState->password));
+ }
+ strcat(t, "@");
+ }
+ strcat(t, request->host);
+ if (request->port != urlDefaultPort(PROTO_FTP))
+ snprintf(&t[strlen(t)], len - strlen(t), ":%d", request->port);
+ strcat(t, strBuf(request->urlpath));
+ strcat(t, "/");
}
void
@@ -1030,7 +1041,7 @@ ftpStart(FwdState * fwd)
ftpState->data.fd = -1;
ftpState->size = -1;
ftpState->mdtm = -1;
- ftpState->flags.pasv_supported = 1;
+ ftpState->flags.pasv_supported = !fwd->flags.ftp_pasv_failed;
ftpState->flags.rest_supported = 1;
ftpState->fwd = fwd;
comm_add_close_handler(fd, ftpStateFree, ftpState);
@@ -1083,6 +1094,7 @@ ftpWriteCommand(const char *buf, FtpStateData * ftpState)
{
debug(9, 5) ("ftpWriteCommand: %s\n", buf);
safe_free(ftpState->ctrl.last_command);
+ safe_free(ftpState->ctrl.last_reply);
ftpState->ctrl.last_command = xstrdup(buf);
comm_write(ftpState->ctrl.fd,
xstrdup(buf),
@@ -1097,8 +1109,6 @@ static void
ftpWriteCommandCallback(int fd, char *bufnotused, size_t size, int errflag, void *data)
{
FtpStateData *ftpState = data;
- StoreEntry *entry = ftpState->entry;
- ErrorState *err;
debug(9, 7) ("ftpWriteCommandCallback: wrote %d bytes\n", size);
if (size > 0) {
fd_bytes(fd, size, FD_WRITE);
@@ -1109,13 +1119,9 @@ ftpWriteCommandCallback(int fd, char *bufnotused, size_t size, int errflag, void
return;
if (errflag) {
debug(50, 1) ("ftpWriteCommandCallback: FD %d: %s\n", fd, xstrerror());
- if (entry->mem_obj->inmem_hi == 0) {
- err = errorCon(ERR_WRITE_ERROR, HTTP_SERVICE_UNAVAILABLE);
- err->xerrno = errno;
- err->request = requestLink(ftpState->request);
- errorAppendEntry(entry, err);
- }
- comm_close(ftpState->ctrl.fd);
+ ftpFailed(ftpState, ERR_WRITE_ERROR);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
+ return;
}
}
@@ -1206,8 +1212,11 @@ ftpReadControlReply(int fd, void *data)
FtpStateData *ftpState = data;
StoreEntry *entry = ftpState->entry;
int len;
- ErrorState *err;
debug(9, 5) ("ftpReadControlReply\n");
+ if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
+ comm_close(ftpState->ctrl.fd);
+ return;
+ }
assert(ftpState->ctrl.offset < ftpState->ctrl.size);
Counter.syscalls.sock.reads++;
len = read(fd,
@@ -1224,26 +1233,17 @@ ftpReadControlReply(int fd, void *data)
if (ignoreErrno(errno)) {
ftpScheduleReadControlReply(ftpState, 0);
} else {
- if (entry->mem_obj->inmem_hi == 0) {
- err = errorCon(ERR_READ_ERROR, HTTP_INTERNAL_SERVER_ERROR);
- err->xerrno = errno;
- err->request = requestLink(ftpState->request);
- errorAppendEntry(entry, err);
- }
- comm_close(ftpState->ctrl.fd);
+ ftpFailed(ftpState, ERR_READ_ERROR);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
+ return;
}
return;
}
if (len == 0) {
if (entry->store_status == STORE_PENDING) {
- storeReleaseRequest(entry);
- if (entry->mem_obj->inmem_hi == 0) {
- err = errorCon(ERR_FTP_FAILURE, HTTP_INTERNAL_SERVER_ERROR);
- err->xerrno = 0;
- err->request = requestLink(ftpState->request);
- err->ftp_server_msg = ftpState->ctrl.message;
- errorAppendEntry(entry, err);
- }
+ ftpFailed(ftpState, ERR_FTP_FAILURE);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
+ return;
}
comm_close(ftpState->ctrl.fd);
return;
@@ -1304,6 +1304,8 @@ ftpReadWelcome(FtpStateData * ftpState)
debug(9, 3) ("ftpReadWelcome\n");
if (ftpState->flags.pasv_only)
ftpState->login_att++;
+ /* Dont retry if the FTP server accepted the connection */
+ ftpState->fwd->flags.dont_retry = 1;
if (code == 220) {
if (ftpState->ctrl.message) {
if (strstr(ftpState->ctrl.message->key, "NetWare"))
@@ -1721,6 +1723,9 @@ ftpReadPasv(FtpStateData * ftpState)
debug(9, 5) ("ftpReadPasv: connecting to %s, port %d\n", junk, port);
ftpState->data.port = port;
ftpState->data.host = xstrdup(junk);
+ safe_free(ftpState->ctrl.last_command);
+ safe_free(ftpState->ctrl.last_reply);
+ ftpState->ctrl.last_command = xstrdup("Connect to server data port");
commConnectStart(fd, junk, port, ftpPasvCallback, ftpState);
}
@@ -1728,17 +1733,13 @@ static void
ftpPasvCallback(int fd, int status, void *data)
{
FtpStateData *ftpState = data;
- request_t *request = ftpState->request;
- ErrorState *err;
debug(9, 3) ("ftpPasvCallback\n");
if (status != COMM_OK) {
- err = errorCon(ERR_CONNECT_FAIL, HTTP_SERVICE_UNAVAILABLE);
- err->xerrno = errno;
- err->host = xstrdup(ftpState->data.host);
- err->port = ftpState->data.port;
- err->request = requestLink(request);
- errorAppendEntry(ftpState->entry, err);
- comm_close(ftpState->ctrl.fd);
+ debug(9, 2) ("ftpPasvCallback: failed to connect. Retrying without PASV.\n");
+ ftpState->fwd->flags.dont_retry = 0; /* this is a retryable error */
+ ftpState->fwd->flags.ftp_pasv_failed = 1;
+ ftpFailed(ftpState, ERR_NONE);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
return;
}
ftpRestOrList(ftpState);
@@ -1868,7 +1869,6 @@ ftpAcceptDataConnection(int fd, void *data)
static void
ftpRestOrList(FtpStateData * ftpState)
{
-
debug(9, 3) ("This is ftpRestOrList\n");
if (ftpState->flags.put) {
debug(9, 3) ("ftpRestOrList: Sending STOR request...\n");
@@ -1889,10 +1889,20 @@ ftpRestOrList(FtpStateData * ftpState)
static void
ftpSendStor(FtpStateData * ftpState)
{
- assert(ftpState->filepath != NULL);
- snprintf(cbuf, 1024, "STOR %s\r\n", ftpState->filepath);
- ftpWriteCommand(cbuf, ftpState);
- ftpState->state = SENT_STOR;
+ if (ftpState->filepath != NULL) {
+ /* Plain file upload */
+ snprintf(cbuf, 1024, "STOR %s\r\n", ftpState->filepath);
+ ftpWriteCommand(cbuf, ftpState);
+ ftpState->state = SENT_STOR;
+ } else if (httpHeaderGetInt(&ftpState->request->header, HDR_CONTENT_LENGTH) > 0) {
+ /* File upload without a filename. use STOU to generate one */
+ snprintf(cbuf, 1024, "STOU\r\n");
+ ftpWriteCommand(cbuf, ftpState);
+ ftpState->state = SENT_STOR;
+ } else {
+ /* No file to transfer. Only create directories if needed */
+ ftpSendReply(ftpState);
+ }
}
static void
@@ -1900,7 +1910,9 @@ ftpReadStor(FtpStateData * ftpState)
{
int code = ftpState->ctrl.replycode;
debug(9, 3) ("This is ftpReadStor\n");
- if (code >= 100 && code < 200) {
+ if (code == 125 || (code == 150 && ftpState->data.host)) {
+ /* Begin data transfer */
+ debug(9, 3) ("ftpReadStor: starting data transfer\n");
/*
* Cancel the timeout on the Control socket, pumpStart will
* establish one on the data socket.
@@ -1909,15 +1921,17 @@ ftpReadStor(FtpStateData * ftpState)
ftpPutStart(ftpState);
debug(9, 3) ("ftpReadStor: writing data channel\n");
ftpState->state = WRITING_DATA;
- } else if (code == 553) {
- /* directory does not exist, have to create, sigh */
-#if WORK_IN_PROGRESS
- ftpTraverseDirectory(ftpState);
-#endif
- ftpSendReply(ftpState);
+ } else if (code == 150) {
+ /* Accept data channel */
+ debug(9, 3) ("ftpReadStor: accepting data channel\n");
+ commSetSelect(ftpState->data.fd,
+ COMM_SELECT_READ,
+ ftpAcceptDataConnection,
+ ftpState,
+ 0);
} else {
- debug(9, 3) ("ftpReadStor: that's all folks\n");
- ftpSendReply(ftpState);
+ debug(9, 3) ("ftpReadStor: Unexpected reply code %s\n", code);
+ ftpFail(ftpState);
}
}
@@ -2099,13 +2113,22 @@ ftpReadTransferDone(FtpStateData * ftpState)
{
int code = ftpState->ctrl.replycode;
debug(9, 3) ("This is ftpReadTransferDone\n");
- if (code != 226) {
+ if (code == 226) {
+ /* Connection closed; retrieval done. */
+ if (ftpState->flags.html_header_sent)
+ ftpListingFinish(ftpState);
+ if (!ftpState->flags.put) {
+ storeTimestampsSet(ftpState->entry);
+ fwdComplete(ftpState->fwd);
+ }
+ ftpDataTransferDone(ftpState);
+ } else { /* != 226 */
debug(9, 1) ("ftpReadTransferDone: Got code %d after reading data\n",
code);
- debug(9, 1) ("--> releasing '%s'\n", storeUrl(ftpState->entry));
- storeReleaseRequest(ftpState->entry);
+ ftpFailed(ftpState, ERR_FTP_FAILURE);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
+ return;
}
- ftpDataTransferDone(ftpState);
}
static void
@@ -2204,7 +2227,6 @@ ftpHackShortcut(FtpStateData * ftpState, FTPSM * nextState)
static void
ftpFail(FtpStateData * ftpState)
{
- ErrorState *err;
debug(9, 3) ("ftpFail\n");
/* Try the / hack to support "Netscape" FTP URL's for retreiving files */
if (!ftpState->flags.isdir && /* Not a directory */
@@ -2235,45 +2257,80 @@ ftpFail(FtpStateData * ftpState)
break;
}
}
+ ftpFailed(ftpState, ERR_NONE);
+ /* ftpFailed closes ctrl.fd and frees ftpState */
+}
+
+static void
+ftpFailed(FtpStateData * ftpState, err_type error)
+{
+ StoreEntry *entry = ftpState->entry;
+ if (entry->mem_obj->inmem_hi == 0)
+ ftpFailedErrorMessage(ftpState, error);
+ if (ftpState->data.fd > -1) {
+ comm_close(ftpState->data.fd);
+ ftpState->data.fd = -1;
+ }
+ comm_close(ftpState->ctrl.fd);
+}
+
+static void
+ftpFailedErrorMessage(FtpStateData * ftpState, err_type error)
+{
+ ErrorState *err;
+ char *command, *reply;
/* Translate FTP errors into HTTP errors */
err = NULL;
- switch (ftpState->state) {
- case SENT_USER:
- case SENT_PASS:
- if (ftpState->ctrl.replycode > 500)
- err = errorCon(ERR_FTP_FORBIDDEN, HTTP_FORBIDDEN);
- else if (ftpState->ctrl.replycode == 421)
- err = errorCon(ERR_FTP_UNAVAILABLE, HTTP_SERVICE_UNAVAILABLE);
+ switch (error) {
+ case ERR_NONE:
+ switch (ftpState->state) {
+ case SENT_USER:
+ case SENT_PASS:
+ if (ftpState->ctrl.replycode > 500)
+ err = errorCon(ERR_FTP_FORBIDDEN, HTTP_FORBIDDEN);
+ else if (ftpState->ctrl.replycode == 421)
+ err = errorCon(ERR_FTP_UNAVAILABLE, HTTP_SERVICE_UNAVAILABLE);
+ break;
+ case SENT_CWD:
+ case SENT_RETR:
+ if (ftpState->ctrl.replycode == 550)
+ err = errorCon(ERR_FTP_NOT_FOUND, HTTP_NOT_FOUND);
+ break;
+ default:
+ break;
+ }
break;
- case SENT_CWD:
- case SENT_RETR:
- if (ftpState->ctrl.replycode == 550)
- err = errorCon(ERR_FTP_NOT_FOUND, HTTP_NOT_FOUND);
+ case ERR_READ_TIMEOUT:
+ err = errorCon(error, HTTP_GATEWAY_TIMEOUT);
break;
default:
+ err = errorCon(error, HTTP_BAD_GATEWAY);
break;
}
if (err == NULL)
err = errorCon(ERR_FTP_FAILURE, HTTP_BAD_GATEWAY);
+ err->xerrno = errno;
err->request = requestLink(ftpState->request);
- err->ftp_server_msg = ftpState->ctrl.message;
+ err->ftp.server_msg = ftpState->ctrl.message;
+ ftpState->ctrl.message = NULL;
if (ftpState->old_request)
- err->ftp.request = ftpState->old_request;
+ command = ftpState->old_request;
else
- err->ftp.request = ftpState->ctrl.last_command;
- if (err->ftp.request) {
- if (!strncmp(err->ftp.request, "PASS", 4))
- err->ftp.request = "PASS ";
- }
+ command = ftpState->ctrl.last_command;
+ if (command && strncmp(command, "PASS", 4) == 0)
+ command = "PASS ";
if (ftpState->old_reply)
- err->ftp.reply = ftpState->old_reply;
+ reply = ftpState->old_reply;
else
- err->ftp.reply = ftpState->ctrl.last_reply;
- errorAppendEntry(ftpState->entry, err);
- comm_close(ftpState->ctrl.fd);
+ reply = ftpState->ctrl.last_reply;
+ if (command)
+ err->ftp.request = xstrdup(command);
+ if (reply)
+ err->ftp.reply = xstrdup(reply);
+ fwdFail(ftpState->fwd, err);
}
-void
+static void
ftpPumpClosedData(int data_fd, void *data)
{
FtpStateData *ftpState = data;
@@ -2329,6 +2386,9 @@ ftpSendReply(FtpStateData * ftpState)
if (code == 226) {
err_code = (ftpState->mdtm > 0) ? ERR_FTP_PUT_MODIFIED : ERR_FTP_PUT_CREATED;
http_code = (ftpState->mdtm > 0) ? HTTP_ACCEPTED : HTTP_CREATED;
+ } else if (code == 227) {
+ err_code = ERR_FTP_PUT_CREATED;
+ http_code = HTTP_CREATED;
} else {
err_code = ERR_FTP_PUT_ERROR;
http_code = HTTP_INTERNAL_SERVER_ERROR;
@@ -2336,16 +2396,16 @@ ftpSendReply(FtpStateData * ftpState)
err = errorCon(err_code, http_code);
err->request = requestLink(ftpState->request);
if (ftpState->old_request)
- err->ftp.request = ftpState->old_request;
+ err->ftp.request = xstrdup(ftpState->old_request);
else
- err->ftp.request = ftpState->ctrl.last_command;
+ err->ftp.request = xstrdup(ftpState->ctrl.last_command);
if (ftpState->old_reply)
- err->ftp.reply = ftpState->old_reply;
+ err->ftp.reply = xstrdup(ftpState->old_reply);
else
- err->ftp.reply = ftpState->ctrl.last_reply;
+ err->ftp.reply = xstrdup(ftpState->ctrl.last_reply);
errorAppendEntry(ftpState->entry, err);
storeBufferFlush(ftpState->entry);
- comm_close(ftpState->ctrl.fd);
+ ftpSendQuit(ftpState);
}
static void
diff --git a/src/gopher.cc b/src/gopher.cc
index 9ab5b63363..4b2697eee1 100644
--- a/src/gopher.cc
+++ b/src/gopher.cc
@@ -1,7 +1,7 @@
/*
- * $Id: gopher.cc,v 1.150 1999/01/31 15:58:54 wessels Exp $
+ * $Id: gopher.cc,v 1.151 1999/10/04 05:05:13 wessels Exp $
*
* DEBUG: section 10 Gopher
* AUTHOR: Harvest Derived
@@ -430,7 +430,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
port[0] = 0; /* 0 means none */
}
/* escape a selector here */
- escaped_selector = xstrdup(rfc1738_escape(selector));
+ escaped_selector = xstrdup(rfc1738_escape_part(selector));
switch (gtype) {
case GOPHER_DIRECTORY:
diff --git a/src/htcp.cc b/src/htcp.cc
index e64a1a9c64..e51fd604a7 100644
--- a/src/htcp.cc
+++ b/src/htcp.cc
@@ -1,6 +1,6 @@
/*
- * $Id: htcp.cc,v 1.27 1999/06/10 06:10:30 wessels Exp $
+ * $Id: htcp.cc,v 1.28 1999/10/04 05:05:14 wessels Exp $
*
* DEBUG: section 31 Hypertext Caching Protocol
* AUTHOR: Duane Wesssels
@@ -365,16 +365,22 @@ htcpBuildPacket(htcpStuff * stuff, ssize_t * len)
htcpHeader hdr;
char *buf = xcalloc(buflen, 1);
/* skip the header -- we don't know the overall length */
- if (buflen < hdr_sz)
+ if (buflen < hdr_sz) {
+ xfree(buf);
return NULL;
+ }
off += hdr_sz;
s = htcpBuildData(buf + off, buflen - off, stuff);
- if (s < 0)
+ if (s < 0) {
+ xfree(buf);
return NULL;
+ }
off += s;
s = htcpBuildAuth(buf + off, buflen - off);
- if (s < 0)
+ if (s < 0) {
+ xfree(buf);
return NULL;
+ }
off += s;
hdr.length = htons((u_short) off);
hdr.major = 0;
@@ -412,7 +418,7 @@ htcpFreeSpecifier(htcpSpecifier * s)
safe_free(s->uri);
safe_free(s->version);
safe_free(s->req_hdrs);
- xfree(s);
+ memFree(s, MEM_HTCP_SPECIFIER);
}
static void
@@ -421,7 +427,7 @@ htcpFreeDetail(htcpDetail * d)
safe_free(d->resp_hdrs);
safe_free(d->entity_hdrs);
safe_free(d->cache_hdrs);
- xfree(d);
+ memFree(d, MEM_HTCP_DETAIL);
}
static int
@@ -454,7 +460,7 @@ htcpUnpackCountstr(char *buf, int sz, char **str)
static htcpSpecifier *
htcpUnpackSpecifier(char *buf, int sz)
{
- htcpSpecifier *s = xcalloc(1, sizeof(htcpSpecifier));
+ htcpSpecifier *s = memAllocate(MEM_HTCP_SPECIFIER);
int o;
debug(31, 3) ("htcpUnpackSpecifier: %d bytes\n", (int) sz);
o = htcpUnpackCountstr(buf, sz, &s->method);
@@ -496,7 +502,7 @@ htcpUnpackSpecifier(char *buf, int sz)
static htcpDetail *
htcpUnpackDetail(char *buf, int sz)
{
- htcpDetail *d = xcalloc(1, sizeof(htcpDetail));
+ htcpDetail *d = memAllocate(MEM_HTCP_DETAIL);
int o;
debug(31, 3) ("htcpUnpackDetail: %d bytes\n", (int) sz);
o = htcpUnpackCountstr(buf, sz, &d->resp_hdrs);
@@ -541,6 +547,7 @@ htcpTstReply(htcpDataHeader * dhdr, StoreEntry * e, htcpSpecifier * spec, struct
int hops = 0;
int samp = 0;
char cto_buf[128];
+ memset(&stuff, '\0', sizeof(stuff));
stuff.op = HTCP_TST;
stuff.rr = RR_RESPONSE;
stuff.f1 = 0;
@@ -667,6 +674,7 @@ htcpHandleTstResponse(htcpDataHeader * hdr, char *buf, int sz, struct sockaddr_i
key = queried_keys[htcpReply.msg_id % N_QUERIED_KEYS];
debug(31, 3) ("htcpHandleTstResponse: key (%p) %s\n", key, storeKeyText(key));
neighborsHtcpReply(key, &htcpReply, from);
+ httpHeaderClean(&htcpReply.hdr);
if (d)
htcpFreeDetail(d);
}
@@ -847,6 +855,8 @@ htcpInit(void)
} else {
htcpOutSocket = htcpInSocket;
}
+ memDataInit(MEM_HTCP_SPECIFIER, "htcpSpecifier", sizeof(htcpSpecifier), 0);
+ memDataInit(MEM_HTCP_DETAIL, "htcpDetail", sizeof(htcpDetail), 0);
}
void
@@ -879,6 +889,7 @@ htcpQuery(StoreEntry * e, request_t * req, peer * p)
packerClean(&pa);
stuff.S.req_hdrs = mb.buf;
pkt = htcpBuildPacket(&stuff, &pktlen);
+ memBufClean(&mb);
if (pkt == NULL) {
debug(31, 0) ("htcpQuery: htcpBuildPacket() failed\n");
return;
diff --git a/src/http.cc b/src/http.cc
index ca7981eb2e..df354804ae 100644
--- a/src/http.cc
+++ b/src/http.cc
@@ -1,6 +1,6 @@
/*
- * $Id: http.cc,v 1.353 1999/09/29 00:22:14 wessels Exp $
+ * $Id: http.cc,v 1.354 1999/10/04 05:05:15 wessels Exp $
*
* DEBUG: section 11 Hypertext Transfer Protocol (HTTP)
* AUTHOR: Harvest Derived
@@ -291,81 +291,80 @@ httpProcessReplyHeader(HttpStateData * httpState, const char *buf, int size)
char *t = NULL;
StoreEntry *entry = httpState->entry;
int room;
- int hdr_len;
+ size_t hdr_len;
HttpReply *reply = entry->mem_obj->reply;
+ Ctx ctx;
debug(11, 3) ("httpProcessReplyHeader: key '%s'\n",
storeKeyText(entry->key));
if (httpState->reply_hdr == NULL)
httpState->reply_hdr = memAllocate(MEM_8K_BUF);
- if (httpState->reply_hdr_state == 0) {
- hdr_len = strlen(httpState->reply_hdr);
- room = 8191 - hdr_len;
- strncat(httpState->reply_hdr, buf, room < size ? room : size);
- hdr_len += room < size ? room : size;
- if (hdr_len > 4 && strncmp(httpState->reply_hdr, "HTTP/", 5)) {
- debug(11, 3) ("httpProcessReplyHeader: Non-HTTP-compliant header: '%s'\n", httpState->reply_hdr);
- httpState->reply_hdr_state += 2;
- reply->sline.status = HTTP_INVALID_HEADER;
- return;
- }
- t = httpState->reply_hdr + hdr_len;
- /* headers can be incomplete only if object still arriving */
- if (!httpState->eof) {
- size_t k = headersEnd(httpState->reply_hdr, 8192);
- if (0 == k)
- return; /* headers not complete */
- t = httpState->reply_hdr + k;
- }
- *t = '\0';
- httpState->reply_hdr_state++;
- }
- if (httpState->reply_hdr_state == 1) {
- const Ctx ctx = ctx_enter(entry->mem_obj->url);
- httpState->reply_hdr_state++;
- debug(11, 9) ("GOT HTTP REPLY HDR:\n---------\n%s\n----------\n",
- httpState->reply_hdr);
- /* Parse headers into reply structure */
- /* what happens if we fail to parse here? */
- httpReplyParse(reply, httpState->reply_hdr); /* httpState->eof); */
- storeTimestampsSet(entry);
- /* Check if object is cacheable or not based on reply code */
- debug(11, 3) ("httpProcessReplyHeader: HTTP CODE: %d\n", reply->sline.status);
- if (neighbors_do_private_keys)
- httpMaybeRemovePublic(entry, reply->sline.status);
- switch (httpCachableReply(httpState)) {
- case 1:
- httpMakePublic(entry);
- break;
- case 0:
- httpMakePrivate(entry);
- break;
- case -1:
- httpCacheNegatively(entry);
- break;
- default:
- assert(0);
- break;
- }
- if (reply->cache_control) {
- if (EBIT_TEST(reply->cache_control->mask, CC_PROXY_REVALIDATE))
- EBIT_SET(entry->flags, ENTRY_REVALIDATE);
- else if (EBIT_TEST(reply->cache_control->mask, CC_MUST_REVALIDATE))
- EBIT_SET(entry->flags, ENTRY_REVALIDATE);
- }
- if (httpState->flags.keepalive)
- if (httpState->peer)
- httpState->peer->stats.n_keepalives_sent++;
- if (reply->keep_alive)
- if (httpState->peer)
- httpState->peer->stats.n_keepalives_recv++;
- ctx_exit(ctx);
- if (reply->date > -1 && !httpState->peer) {
- int skew = abs(reply->date - squid_curtime);
- if (skew > 86400)
- debug(11, 3) ("%s's clock is skewed by %d seconds!\n",
- httpState->request->host, skew);
- }
+ assert(httpState->reply_hdr_state == 0);
+ hdr_len = strlen(httpState->reply_hdr);
+ room = 8191 - hdr_len;
+ strncat(httpState->reply_hdr, buf, room < size ? room : size);
+ hdr_len += room < size ? room : size;
+ if (hdr_len > 4 && strncmp(httpState->reply_hdr, "HTTP/", 5)) {
+ debug(11, 3) ("httpProcessReplyHeader: Non-HTTP-compliant header: '%s'\n", httpState->reply_hdr);
+ httpState->reply_hdr_state += 2;
+ reply->sline.status = HTTP_INVALID_HEADER;
+ return;
+ }
+ t = httpState->reply_hdr + hdr_len;
+ /* headers can be incomplete only if object still arriving */
+ if (!httpState->eof) {
+ size_t k = headersEnd(httpState->reply_hdr, 8192);
+ if (0 == k)
+ return; /* headers not complete */
+ t = httpState->reply_hdr + k;
+ }
+ *t = '\0';
+ httpState->reply_hdr_state++;
+ assert(httpState->reply_hdr_state == 1);
+ ctx = ctx_enter(entry->mem_obj->url);
+ httpState->reply_hdr_state++;
+ debug(11, 9) ("GOT HTTP REPLY HDR:\n---------\n%s\n----------\n",
+ httpState->reply_hdr);
+ /* Parse headers into reply structure */
+ /* what happens if we fail to parse here? */
+ httpReplyParse(reply, httpState->reply_hdr, hdr_len);
+ storeTimestampsSet(entry);
+ /* Check if object is cacheable or not based on reply code */
+ debug(11, 3) ("httpProcessReplyHeader: HTTP CODE: %d\n", reply->sline.status);
+ if (neighbors_do_private_keys)
+ httpMaybeRemovePublic(entry, reply->sline.status);
+ switch (httpCachableReply(httpState)) {
+ case 1:
+ httpMakePublic(entry);
+ break;
+ case 0:
+ httpMakePrivate(entry);
+ break;
+ case -1:
+ httpCacheNegatively(entry);
+ break;
+ default:
+ assert(0);
+ break;
}
+ if (reply->cache_control) {
+ if (EBIT_TEST(reply->cache_control->mask, CC_PROXY_REVALIDATE))
+ EBIT_SET(entry->flags, ENTRY_REVALIDATE);
+ else if (EBIT_TEST(reply->cache_control->mask, CC_MUST_REVALIDATE))
+ EBIT_SET(entry->flags, ENTRY_REVALIDATE);
+ }
+ if (httpState->flags.keepalive)
+ if (httpState->peer)
+ httpState->peer->stats.n_keepalives_sent++;
+ if (reply->keep_alive)
+ if (httpState->peer)
+ httpState->peer->stats.n_keepalives_recv++;
+ if (reply->date > -1 && !httpState->peer) {
+ int skew = abs(reply->date - squid_curtime);
+ if (skew > 86400)
+ debug(11, 3) ("%s's clock is skewed by %d seconds!\n",
+ httpState->request->host, skew);
+ }
+ ctx_exit(ctx);
}
static int
@@ -616,8 +615,8 @@ httpBuildRequestHeader(request_t * request,
HttpHeaderPos pos = HttpHeaderInitPos;
httpHeaderInit(hdr_out, hoRequest);
/* append our IMS header */
- if (entry && entry->lastmod > -1 && request->method == METHOD_GET)
- httpHeaderPutTime(hdr_out, HDR_IF_MODIFIED_SINCE, entry->lastmod);
+ if (request->lastmod > -1 && request->method == METHOD_GET)
+ httpHeaderPutTime(hdr_out, HDR_IF_MODIFIED_SINCE, request->lastmod);
/* decide if we want to do Ranges ourselves
* (and fetch the whole object now)
@@ -764,7 +763,7 @@ httpBuildRequestHeader(request_t * request,
/* build request prefix and append it to a given MemBuf;
* return the length of the prefix */
-size_t
+mb_size_t
httpBuildRequestPrefix(request_t * request,
request_t * orig_request,
StoreEntry * entry,
@@ -863,6 +862,7 @@ httpStart(FwdState * fwd)
xstrncpy(proxy_req->host, httpState->peer->host, SQUIDHOSTNAMELEN);
proxy_req->port = httpState->peer->http_port;
proxy_req->flags = orig_req->flags;
+ proxy_req->lastmod = orig_req->lastmod;
httpState->request = requestLink(proxy_req);
httpState->orig_request = requestLink(orig_req);
proxy_req->flags.proxying = 1;
diff --git a/src/ipc.cc b/src/ipc.cc
index 3d79039f58..d2f5018c73 100644
--- a/src/ipc.cc
+++ b/src/ipc.cc
@@ -1,6 +1,6 @@
/*
- * $Id: ipc.cc,v 1.15 1998/11/20 06:08:01 wessels Exp $
+ * $Id: ipc.cc,v 1.16 1999/10/04 05:05:16 wessels Exp $
*
* DEBUG: section 54 Interprocess Communication
* AUTHOR: Duane Wessels
@@ -182,15 +182,16 @@ ipcCreate(int type, const char *prog, char *const args[], const char *name, int
}
memset(hello_buf, '\0', HELLO_BUF_SZ);
if (type == IPC_UDP_SOCKET)
- x = recv(prfd, hello_buf, HELLO_BUF_SZ, 0);
+ x = recv(prfd, hello_buf, HELLO_BUF_SZ - 1, 0);
else
- x = read(prfd, hello_buf, HELLO_BUF_SZ);
+ x = read(prfd, hello_buf, HELLO_BUF_SZ - 1);
if (x < 0) {
debug(50, 0) ("ipcCreate: PARENT: hello read test failed\n");
debug(50, 0) ("--> read: %s\n", xstrerror());
return ipcCloseAllFD(prfd, pwfd, crfd, cwfd);
} else if (strcmp(hello_buf, hello_string)) {
debug(54, 0) ("ipcCreate: PARENT: hello read test failed\n");
+ debug(54, 0) ("--> read returned %d\n", x);
debug(54, 0) ("--> got '%s'\n", rfc1738_escape(hello_buf));
return ipcCloseAllFD(prfd, pwfd, crfd, cwfd);
}
diff --git a/src/main.cc b/src/main.cc
index e880a43e5b..8a88f5bfbe 100644
--- a/src/main.cc
+++ b/src/main.cc
@@ -1,6 +1,6 @@
/*
- * $Id: main.cc,v 1.304 1999/08/02 06:18:38 wessels Exp $
+ * $Id: main.cc,v 1.305 1999/10/04 05:05:17 wessels Exp $
*
* DEBUG: section 1 Startup and Main Loop
* AUTHOR: Harvest Derived
@@ -72,6 +72,10 @@ extern void log_trace_init(char *);
static EVH SquidShutdown;
static void mainSetCwd(void);
+#if TEST_ACCESS
+#include "test_access.c"
+#endif
+
static void
usage(void)
{
@@ -482,7 +486,6 @@ mainInitialize(void)
unlinkdInit();
urlInitialize();
cachemgrInit();
- eventInit(); /* eventInit() before statInit() */
statInit();
storeInit();
mainSetCwd();
@@ -588,12 +591,21 @@ main(int argc, char **argv)
leakInit();
#endif
memInit(); /* memInit is required for config parsing */
+ eventInit(); /* eventInit() is required for config parsing */
parse_err = parseConfigFile(ConfigFile);
if (opt_parse_cfg_only)
return parse_err;
}
+#if TEST_ACCESS
+ comm_init();
+ comm_select_init();
+ mainInitialize();
+ test_access();
+ return 0;
+#endif
+
/* send signal to running copy and exit */
if (opt_send_signal != -1) {
sendSignal();
diff --git a/src/neighbors.cc b/src/neighbors.cc
index 30307afa9a..dce2842163 100644
--- a/src/neighbors.cc
+++ b/src/neighbors.cc
@@ -1,6 +1,6 @@
/*
- * $Id: neighbors.cc,v 1.275 1999/06/16 22:10:40 wessels Exp $
+ * $Id: neighbors.cc,v 1.276 1999/10/04 05:05:19 wessels Exp $
*
* DEBUG: section 15 Neighbor Routines
* AUTHOR: Harvest Derived
@@ -102,7 +102,7 @@ neighborType(const peer * p, const request_t * request)
{
const struct _domain_type *d = NULL;
for (d = p->typelist; d; d = d->next) {
- if (matchDomainName(d->domain, request->host))
+ if (0 == matchDomainName(d->domain, request->host))
if (d->type != PEER_NONE)
return d->type;
}
@@ -136,7 +136,7 @@ peerAllowedToUse(const peer * p, request_t * request)
return do_ping;
do_ping = 0;
for (d = p->peer_domain; d; d = d->next) {
- if (matchDomainName(d->domain, request->host)) {
+ if (0 == matchDomainName(d->domain, request->host)) {
do_ping = d->do_ping;
break;
}
@@ -1306,7 +1306,7 @@ neighborsHtcpReply(const cache_key * key, htcpReplyData * htcp, const struct soc
neighborCountIgnored(p);
return;
}
- debug(15, 1) ("neighborsHtcpReply: e = %p\n", e);
+ debug(15, 3) ("neighborsHtcpReply: e = %p\n", e);
mem->ping_reply_callback(p, ntype, PROTO_HTCP, htcp, mem->ircb_data);
}
#endif
diff --git a/src/net_db.cc b/src/net_db.cc
index c4135c877f..e6b2b95676 100644
--- a/src/net_db.cc
+++ b/src/net_db.cc
@@ -1,6 +1,6 @@
/*
- * $Id: net_db.cc,v 1.139 1999/05/04 21:58:29 wessels Exp $
+ * $Id: net_db.cc,v 1.140 1999/10/04 05:05:20 wessels Exp $
*
* DEBUG: section 38 Network Measurement Database
* AUTHOR: Duane Wessels
@@ -528,7 +528,7 @@ netdbExchangeHandleReply(void *data, char *buf, ssize_t size)
debug(38, 5) ("netdbExchangeHandleReply: hdr_sz = %d\n", hdr_sz);
rep = ex->e->mem_obj->reply;
if (0 == rep->sline.status)
- httpReplyParse(rep, buf);
+ httpReplyParse(rep, buf, hdr_sz);
debug(38, 3) ("netdbExchangeHandleReply: reply status %d\n",
rep->sline.status);
if (HTTP_OK != rep->sline.status) {
@@ -973,6 +973,8 @@ netdbExchangeStart(void *data)
storeClientCopy(ex->e, ex->seen, ex->used, ex->buf_sz,
ex->buf, netdbExchangeHandleReply, ex);
ex->r->flags.loopdetect = 1; /* cheat! -- force direct */
+ if (p->login)
+ xstrncpy(ex->r->login, p->login, MAX_LOGIN_SZ);
fwdStart(-1, ex->e, ex->r, no_addr, no_addr);
#endif
}
diff --git a/src/peer_digest.cc b/src/peer_digest.cc
index 14204b1cd9..c6ff44891b 100644
--- a/src/peer_digest.cc
+++ b/src/peer_digest.cc
@@ -1,6 +1,6 @@
/*
- * $Id: peer_digest.cc,v 1.70 1999/01/29 21:28:17 wessels Exp $
+ * $Id: peer_digest.cc,v 1.71 1999/10/04 05:05:20 wessels Exp $
*
* DEBUG: section 72 Peer Digest Routines
* AUTHOR: Alex Rousskov
@@ -285,7 +285,8 @@ peerDigestRequest(PeerDigest * pd)
assert(!req->header.len);
httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr);
httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html");
-
+ if (p->login)
+ xstrncpy(req->login, p->login, MAX_LOGIN_SZ);
/* create fetch state structure */
fetch = memAllocate(MEM_DIGEST_FETCH_STATE);
cbdataAdd(fetch, memFree, MEM_DIGEST_FETCH_STATE);
@@ -330,17 +331,18 @@ peerDigestFetchReply(void *data, char *buf, ssize_t size)
{
DigestFetchState *fetch = data;
PeerDigest *pd = fetch->pd;
+ size_t hdr_size;
assert(pd && buf);
assert(!fetch->offset);
if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
return;
- if (headersEnd(buf, size)) {
+ if ((hdr_size = headersEnd(buf, size))) {
http_status status;
HttpReply *reply = fetch->entry->mem_obj->reply;
assert(reply);
- httpReplyParse(reply, buf);
+ httpReplyParse(reply, buf, hdr_size);
status = reply->sline.status;
debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %d (%+d)\n",
strBuf(pd->host), status,
@@ -410,7 +412,7 @@ peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
if ((hdr_size = headersEnd(buf, size))) {
assert(fetch->entry->mem_obj->reply);
if (!fetch->entry->mem_obj->reply->sline.status)
- httpReplyParse(fetch->entry->mem_obj->reply, buf);
+ httpReplyParse(fetch->entry->mem_obj->reply, buf, hdr_size);
if (fetch->entry->mem_obj->reply->sline.status != HTTP_OK) {
debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n",
strBuf(fetch->pd->host), fetch->entry->mem_obj->reply->sline.status);
diff --git a/src/peer_select.cc b/src/peer_select.cc
index 6a6386fa5b..18bdc78765 100644
--- a/src/peer_select.cc
+++ b/src/peer_select.cc
@@ -1,6 +1,6 @@
/*
- * $Id: peer_select.cc,v 1.100 1999/05/19 19:57:49 wessels Exp $
+ * $Id: peer_select.cc,v 1.101 1999/10/04 05:05:21 wessels Exp $
*
* DEBUG: section 44 Peer Selection Algorithm
* AUTHOR: Duane Wessels
@@ -52,7 +52,8 @@ const char *hier_strings[] =
"SOURCE_FASTEST",
"ROUNDROBIN_PARENT",
#if USE_CACHE_DIGESTS
- "CACHE_DIGEST_HIT",
+ "CD_PARENT_HIT",
+ "CD_SIBLING_HIT",
#endif
#if USE_CARP
"CARP",
@@ -310,7 +311,10 @@ peerGetSomeNeighbor(ps_state * ps)
}
#if USE_CACHE_DIGESTS
if ((p = neighborsDigestSelect(request, entry))) {
- code = CACHE_DIGEST_HIT;
+ if (neighborType(p, request) == PEER_PARENT)
+ code = CD_PARENT_HIT;
+ else
+ code = CD_SIBLING_HIT;
} else
#endif
#if USE_CARP
@@ -534,7 +538,6 @@ static void
peerHandleHtcpReply(peer * p, peer_t type, htcpReplyData * htcp, void *data)
{
ps_state *psstate = data;
- request_t *request = psstate->request;
debug(44, 3) ("peerHandleIcpReply: %s %s\n",
htcp->hit ? "HIT" : "MISS",
storeUrl(psstate->entry));
diff --git a/src/protos.h b/src/protos.h
index 763a4ca2ac..9106de4967 100644
--- a/src/protos.h
+++ b/src/protos.h
@@ -1,6 +1,6 @@
/*
- * $Id: protos.h,v 1.346 1999/09/29 00:22:16 wessels Exp $
+ * $Id: protos.h,v 1.347 1999/10/04 05:05:22 wessels Exp $
*
*
* SQUID Internet Object Cache http://squid.nlanr.net/Squid/
@@ -129,6 +129,7 @@ extern void clientdbUpdate(struct in_addr, log_type, protocol_t, size_t);
extern int clientdbCutoffDenied(struct in_addr);
extern void clientdbDump(StoreEntry *);
extern void clientdbFreeMemory(void);
+extern int clientdbEstablished(struct in_addr, int);
extern void clientAccessCheck(void *);
extern void clientAccessCheckDone(int, void *);
@@ -283,13 +284,13 @@ extern int httpCachable(method_t);
extern void httpStart(FwdState *);
extern void httpParseReplyHeaders(const char *, http_reply *);
extern void httpProcessReplyHeader(HttpStateData *, const char *, int);
-extern size_t httpBuildRequestPrefix(request_t * request,
+extern mb_size_t httpBuildRequestPrefix(request_t * request,
request_t * orig_request,
StoreEntry * entry,
MemBuf * mb,
int cfd,
http_state_flags);
-extern void httpAnonInitModule();
+extern void httpAnonInitModule(void);
extern int httpAnonHdrAllowed(http_hdr_type hdr_id);
extern int httpAnonHdrDenied(http_hdr_type hdr_id);
extern void httpBuildRequestHeader(request_t *, request_t *, StoreEntry *, HttpHeader *, int, http_state_flags);
@@ -327,9 +328,9 @@ extern void httpBodySet(HttpBody * body, MemBuf * mb);
extern void httpBodyPackInto(const HttpBody * body, Packer * p);
/* Http Cache Control Header Field */
-extern void httpHdrCcInitModule();
-extern void httpHdrCcCleanModule();
-extern HttpHdrCc *httpHdrCcCreate();
+extern void httpHdrCcInitModule(void);
+extern void httpHdrCcCleanModule(void);
+extern HttpHdrCc *httpHdrCcCreate(void);
extern HttpHdrCc *httpHdrCcParseCreate(const String * str);
extern void httpHdrCcDestroy(HttpHdrCc * cc);
extern HttpHdrCc *httpHdrCcDup(const HttpHdrCc * cc);
@@ -349,17 +350,17 @@ extern void httpHdrRangePackInto(const HttpHdrRange * range, Packer * p);
/* iterate through specs */
extern HttpHdrRangeSpec *httpHdrRangeGetSpec(const HttpHdrRange * range, HttpHdrRangePos * pos);
/* adjust specs after the length is known */
-extern int httpHdrRangeCanonize(HttpHdrRange * range, size_t clen);
+extern int httpHdrRangeCanonize(HttpHdrRange *, ssize_t);
/* other */
extern String httpHdrRangeBoundaryStr(clientHttpRequest * http);
extern int httpHdrRangeIsComplex(const HttpHdrRange * range);
extern int httpHdrRangeWillBeComplex(const HttpHdrRange * range);
-extern size_t httpHdrRangeFirstOffset(const HttpHdrRange * range);
-extern size_t httpHdrRangeLowestOffset(const HttpHdrRange * range, size_t size);
+extern ssize_t httpHdrRangeFirstOffset(const HttpHdrRange * range);
+extern ssize_t httpHdrRangeLowestOffset(const HttpHdrRange * range, ssize_t);
/* Http Content Range Header Field */
-extern HttpHdrContRange *httpHdrContRangeCreate();
+extern HttpHdrContRange *httpHdrContRangeCreate(void);
extern HttpHdrContRange *httpHdrContRangeParseCreate(const char *crange_spec);
/* returns true if range is valid; inits HttpHdrContRange */
extern int httpHdrContRangeParseInit(HttpHdrContRange * crange, const char *crange_spec);
@@ -367,7 +368,7 @@ extern void httpHdrContRangeDestroy(HttpHdrContRange * crange);
extern HttpHdrContRange *httpHdrContRangeDup(const HttpHdrContRange * crange);
extern void httpHdrContRangePackInto(const HttpHdrContRange * crange, Packer * p);
/* inits with given spec */
-extern void httpHdrContRangeSet(HttpHdrContRange *, HttpHdrRangeSpec, size_t ent_len);
+extern void httpHdrContRangeSet(HttpHdrContRange *, HttpHdrRangeSpec, ssize_t);
/* Http Header Tools */
extern HttpHeaderFieldInfo *httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs * attrs, int count);
@@ -377,26 +378,25 @@ extern int httpHeaderIdByNameDef(const char *name, int name_len);
extern void httpHeaderMaskInit(HttpHeaderMask * mask, int value);
extern void httpHeaderCalcMask(HttpHeaderMask * mask, const int *enums, int count);
extern int httpHeaderHasConnDir(const HttpHeader * hdr, const char *directive);
-extern void httpHeaderAddContRange(HttpHeader * hdr, HttpHdrRangeSpec spec, size_t ent_len);
+extern void httpHeaderAddContRange(HttpHeader *, HttpHdrRangeSpec, ssize_t);
extern void strListAdd(String * str, const char *item, char del);
extern int strListIsMember(const String * str, const char *item, char del);
extern int strListIsSubstr(const String * list, const char *s, char del);
extern int strListGetItem(const String * str, char del, const char **item, int *ilen, const char **pos);
extern const char *getStringPrefix(const char *str, const char *end);
extern int httpHeaderParseInt(const char *start, int *val);
-extern int httpHeaderParseSize(const char *start, size_t * sz);
+extern int httpHeaderParseSize(const char *start, ssize_t * sz);
extern int httpHeaderReset(HttpHeader * hdr);
#if STDC_HEADERS
extern void httpHeaderPutStrf(HttpHeader * hdr, http_hdr_type id, const char *fmt,...);
#else
-extern void
- httpHeaderPutStrf();
+extern void httpHeaderPutStrf();
#endif
/* Http Header */
-extern void httpHeaderInitModule();
-extern void httpHeaderCleanModule();
+extern void httpHeaderInitModule(void);
+extern void httpHeaderCleanModule(void);
/* init/clean */
extern void httpHeaderInit(HttpHeader * hdr, http_hdr_owner_type owner);
extern void httpHeaderClean(HttpHeader * hdr);
@@ -444,16 +444,16 @@ extern int httpMsgIsPersistent(float http_ver, const HttpHeader * hdr);
extern int httpMsgIsolateHeaders(const char **parse_start, const char **blk_start, const char **blk_end);
/* Http Reply */
-extern void httpReplyInitModule();
+extern void httpReplyInitModule(void);
/* create/destroy */
-extern HttpReply *httpReplyCreate();
+extern HttpReply *httpReplyCreate(void);
extern void httpReplyDestroy(HttpReply * rep);
/* reset: clean, then init */
extern void httpReplyReset(HttpReply * rep);
/* absorb: copy the contents of a new reply to the old one, destroy new one */
extern void httpReplyAbsorb(HttpReply * rep, HttpReply * new_rep);
/* parse returns -1,0,+1 on error,need-more-data,success */
-extern int httpReplyParse(HttpReply * rep, const char *buf); /*, int atEnd); */
+extern int httpReplyParse(HttpReply * rep, const char *buf, ssize_t);
extern void httpReplyPackInto(const HttpReply * rep, Packer * p);
/* ez-routines */
/* mem-pack: returns a ready to use mem buffer with a packed reply */
@@ -1112,6 +1112,7 @@ extern double gb_to_double(const gb_t *);
extern const char *gb_to_str(const gb_t *);
extern void gb_flush(gb_t *); /* internal, do not use this */
extern int stringHasWhitespace(const char *);
+extern int stringHasCntl(const char *);
extern void linklistPush(link_list **, void *);
extern void *linklistShift(link_list **);
extern int xrename(const char *from, const char *to);
diff --git a/src/redirect.cc b/src/redirect.cc
index 0ee0bf37b6..661f5ef8c8 100644
--- a/src/redirect.cc
+++ b/src/redirect.cc
@@ -1,6 +1,6 @@
/*
- * $Id: redirect.cc,v 1.81 1999/06/24 22:08:43 wessels Exp $
+ * $Id: redirect.cc,v 1.82 1999/10/04 05:05:24 wessels Exp $
*
* DEBUG: section 29 Redirector
* AUTHOR: Duane Wessels
@@ -103,6 +103,18 @@ redirectStart(clientHttpRequest * http, RH * handler, void *data)
handler(data, NULL);
return;
}
+ if (Config.accessList.redirector) {
+ aclCheck_t ch;
+ memset(&ch, '\0', sizeof(ch));
+ ch.src_addr = http->conn->peer.sin_addr;
+ ch.my_addr = http->conn->me.sin_addr;
+ ch.request = http->request;
+ if (!aclCheckFast(Config.accessList.redirector, &ch)) {
+ /* denied -- bypass redirector */
+ handler(data, NULL);
+ return;
+ }
+ }
if (Config.onoff.redirector_bypass && redirectors->stats.queue_size) {
/* Skip redirector if there is one request queued */
n_bypassed++;
diff --git a/src/refresh.cc b/src/refresh.cc
index 8ae1e57dc7..2bc5e76389 100644
--- a/src/refresh.cc
+++ b/src/refresh.cc
@@ -1,7 +1,7 @@
/*
- * $Id: refresh.cc,v 1.49 1999/06/10 06:10:34 wessels Exp $
+ * $Id: refresh.cc,v 1.50 1999/10/04 05:05:24 wessels Exp $
*
* DEBUG: section 22 Refresh Calculation
* AUTHOR: Harvest Derived
@@ -386,7 +386,7 @@ refreshStats(StoreEntry * sentry)
}
void
-refreshInit()
+refreshInit(void)
{
memset(refreshCounts, 0, sizeof(refreshCounts));
refreshCounts[rcHTTP].proto = "HTTP";
diff --git a/src/snmp_agent.cc b/src/snmp_agent.cc
index d999b028fb..9aac952b8c 100644
--- a/src/snmp_agent.cc
+++ b/src/snmp_agent.cc
@@ -1,6 +1,6 @@
/*
- * $Id: snmp_agent.cc,v 1.71 1999/06/17 22:20:42 wessels Exp $
+ * $Id: snmp_agent.cc,v 1.72 1999/10/04 05:05:25 wessels Exp $
*
* DEBUG: section 49 SNMP Interface
* AUTHOR: Kostas Anagnostakis
@@ -280,7 +280,11 @@ snmp_prfSysFn(variable_list * Var, snint * ErrP)
break;
case PERF_SYS_CURLRUEXP:
Answer = snmp_var_new_integer(Var->name, Var->name_length,
+#if !HEAP_REPLACEMENT
(snint) (storeExpiredReferenceAge() * 100),
+#else
+ 0,
+#endif
SMI_TIMETICKS);
break;
case PERF_SYS_CURUNLREQ:
diff --git a/src/snmp_core.cc b/src/snmp_core.cc
index a8128370be..544f2fa436 100644
--- a/src/snmp_core.cc
+++ b/src/snmp_core.cc
@@ -1,6 +1,6 @@
/*
- * $Id: snmp_core.cc,v 1.39 1999/06/17 22:20:43 wessels Exp $
+ * $Id: snmp_core.cc,v 1.40 1999/10/04 05:05:26 wessels Exp $
*
* DEBUG: section 49 SNMP support
* AUTHOR: Glenn Chisholm
@@ -483,7 +483,7 @@ snmpHandleUdp(int sock, void *not_used)
/*
* Turn SNMP packet into a PDU, check available ACL's
*/
-void
+static void
snmpDecodePacket(snmp_request_t * rq)
{
struct snmp_pdu *PDU;
@@ -519,7 +519,7 @@ snmpDecodePacket(snmp_request_t * rq)
/*
* Packet OK, ACL Check OK, Create reponse.
*/
-void
+static void
snmpConstructReponse(snmp_request_t * rq)
{
struct snmp_session Session;
@@ -546,7 +546,7 @@ snmpConstructReponse(snmp_request_t * rq)
*
* If configured forward any reponses which are not for this agent.
*/
-struct snmp_pdu *
+static struct snmp_pdu *
snmpAgentResponse(struct snmp_pdu *PDU)
{
struct snmp_pdu *Answer = NULL;
@@ -632,7 +632,7 @@ snmpAgentResponse(struct snmp_pdu *PDU)
return (Answer);
}
-oid_ParseFn *
+static oid_ParseFn *
snmpTreeGet(oid * Current, snint CurrentLen)
{
oid_ParseFn *Fn = NULL;
@@ -659,7 +659,7 @@ snmpTreeGet(oid * Current, snint CurrentLen)
return (Fn);
}
-oid_ParseFn *
+static oid_ParseFn *
snmpTreeNext(oid * Current, snint CurrentLen, oid ** Next, snint * NextLen)
{
oid_ParseFn *Fn = NULL;
@@ -725,7 +725,7 @@ snmpTreeNext(oid * Current, snint CurrentLen, oid ** Next, snint * NextLen)
return (Fn);
}
-oid *
+static oid *
static_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
{
oid *instance = NULL;
@@ -740,7 +740,7 @@ static_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn
return (instance);
}
-oid *
+static oid *
time_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
{
oid *instance = NULL;
@@ -767,7 +767,7 @@ time_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
return (instance);
}
-oid *
+static oid *
peer_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
{
oid *instance = NULL;
@@ -826,7 +826,7 @@ peer_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
return (instance);
}
-oid *
+static oid *
client_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
{
oid *instance = NULL;
@@ -874,7 +874,7 @@ client_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn
/*
* Returns a the sibling object in the tree
*/
-mib_tree_entry *
+static mib_tree_entry *
snmpTreeSiblingEntry(oid entry, snint len, mib_tree_entry * current)
{
mib_tree_entry *next = NULL;
@@ -897,7 +897,7 @@ snmpTreeSiblingEntry(oid entry, snint len, mib_tree_entry * current)
/*
* Returns the requested child object or NULL if it does not exist
*/
-mib_tree_entry *
+static mib_tree_entry *
snmpTreeEntry(oid entry, snint len, mib_tree_entry * current)
{
mib_tree_entry *next = NULL;
@@ -915,7 +915,7 @@ snmpTreeEntry(oid entry, snint len, mib_tree_entry * current)
/*
* Adds a node to the MIB tree structure and adds the appropriate children
*/
-mib_tree_entry *
+static mib_tree_entry *
#if STDC_HEADERS
snmpAddNode(oid * name, int len, oid_ParseFn * parsefunction, instance_Fn * instancefunction, int children,...)
#else
@@ -968,7 +968,7 @@ snmpAddNode(va_alist)
/*
* Returns the list of parameters in an oid
*/
-oid *
+static oid *
#if STDC_HEADERS
snmpCreateOid(int length,...)
#else
@@ -1002,7 +1002,7 @@ snmpCreateOid(va_alist)
/*
* Allocate space for, and copy, an OID. Returns new oid.
*/
-oid *
+static oid *
snmpOidDup(oid * A, snint ALen)
{
oid *Ans = xmalloc(sizeof(oid) * ALen);
diff --git a/src/squid.h b/src/squid.h
index 1c42ae6cd1..c223d6a679 100644
--- a/src/squid.h
+++ b/src/squid.h
@@ -1,6 +1,6 @@
/*
- * $Id: squid.h,v 1.194 1999/09/28 23:48:49 wessels Exp $
+ * $Id: squid.h,v 1.195 1999/10/04 05:05:27 wessels Exp $
*
* AUTHOR: Duane Wessels
*
@@ -51,10 +51,12 @@
#define CHANGE_FD_SETSIZE 0
#endif
-/* Cannot increase FD_SETSIZE on FreeBSD before 2.2.0, causes select(2)
- * to return EINVAL. */
-/* Marian Durkovic */
-/* Peter Wemm */
+/*
+ * Cannot increase FD_SETSIZE on FreeBSD before 2.2.0, causes select(2)
+ * to return EINVAL.
+ * --Marian Durkovic
+ * --Peter Wemm
+ */
#if defined(_SQUID_FREEBSD_)
#include
#if __FreeBSD_version < 220000
@@ -63,6 +65,15 @@
#endif
#endif
+/*
+ * Trying to redefine CHANGE_FD_SETSIZE causes a slew of warnings
+ * on Mac OS X Server.
+ */
+#if defined(_SQUID_APPLE_)
+#undef CHANGE_FD_SETSIZE
+#define CHANGE_FD_SETSIZE 0
+#endif
+
/* Increase FD_SETSIZE if SQUID_MAXFD is bigger */
#if CHANGE_FD_SETSIZE && SQUID_MAXFD > DEFAULT_FD_SETSIZE
#define FD_SETSIZE SQUID_MAXFD
@@ -359,7 +370,14 @@ struct rusage {
#include "globals.h"
#include "util.h"
+
+/*
+ * Mac OS X Server already has radix.h as a standard header, so
+ * this causes conflicts.
+*/
+#ifndef _SQUID_APPLE_
#include "radix.h"
+#endif
#if !HAVE_TEMPNAM
#include "tempnam.h"
diff --git a/src/ssl.cc b/src/ssl.cc
index 12efc2cde6..a90e04424d 100644
--- a/src/ssl.cc
+++ b/src/ssl.cc
@@ -1,6 +1,6 @@
/*
- * $Id: ssl.cc,v 1.99 1999/08/02 06:18:41 wessels Exp $
+ * $Id: ssl.cc,v 1.100 1999/10/04 05:05:28 wessels Exp $
*
* DEBUG: section 26 Secure Sockets Layer Proxy
* AUTHOR: Duane Wessels
@@ -206,8 +206,8 @@ sslReadServer(int fd, void *data)
}
cbdataLock(sslState);
if (len < 0) {
- debug(50, 1) ("sslReadServer: FD %d: read failure: %s\n",
- fd, xstrerror());
+ debug(50, ignoreErrno(errno) ? 3 : 1)
+ ("sslReadServer: FD %d: read failure: %s\n", fd, xstrerror());
if (!ignoreErrno(errno))
comm_close(fd);
} else if (len == 0) {
@@ -281,8 +281,8 @@ sslWriteServer(int fd, void *data)
}
cbdataLock(sslState);
if (len < 0) {
- debug(50, 1) ("sslWriteServer: FD %d: write failure: %s.\n",
- fd, xstrerror());
+ debug(50, ignoreErrno(errno) ? 3 : 1)
+ ("sslWriteServer: FD %d: write failure: %s.\n", fd, xstrerror());
if (!ignoreErrno(errno))
comm_close(fd);
}
@@ -322,8 +322,8 @@ sslWriteClient(int fd, void *data)
}
cbdataLock(sslState);
if (len < 0) {
- debug(50, 1) ("sslWriteClient: FD %d: write failure: %s.\n",
- fd, xstrerror());
+ debug(50, ignoreErrno(errno) ? 3 : 1)
+ ("sslWriteClient: FD %d: write failure: %s.\n", fd, xstrerror());
if (!ignoreErrno(errno))
comm_close(fd);
}
diff --git a/src/stat.cc b/src/stat.cc
index 7e89ae9d68..2916bff99d 100644
--- a/src/stat.cc
+++ b/src/stat.cc
@@ -1,6 +1,6 @@
/*
- * $Id: stat.cc,v 1.320 1999/07/13 14:51:19 wessels Exp $
+ * $Id: stat.cc,v 1.321 1999/10/04 05:05:29 wessels Exp $
*
* DEBUG: section 18 Cache Manager Statistics
* AUTHOR: Harvest Derived
@@ -79,7 +79,7 @@ static OBJH statCountersHistograms;
static OBJH statClientRequests;
#ifdef XMALLOC_STATISTICS
-static void info_get_mallstat(int, int, StoreEntry *);
+static void info_get_mallstat(int, int, void *);
#endif
StatCounters CountHist[N_COUNT_HIST];
@@ -383,8 +383,9 @@ statOpenfdObj(StoreEntry * sentry)
#ifdef XMALLOC_STATISTICS
static void
-info_get_mallstat(int size, int number, StoreEntry * sentry)
+info_get_mallstat(int size, int number, void *data)
{
+ StoreEntry * sentry = data;
if (number > 0)
storeAppendPrintf(sentry, "\t%d = %d\n", size, number);
}
diff --git a/src/store.cc b/src/store.cc
index a84501ffd0..238c5d614b 100644
--- a/src/store.cc
+++ b/src/store.cc
@@ -1,6 +1,6 @@
/*
- * $Id: store.cc,v 1.508 1999/09/29 00:22:18 wessels Exp $
+ * $Id: store.cc,v 1.509 1999/10/04 05:05:31 wessels Exp $
*
* DEBUG: section 20 Storage Manager
* AUTHOR: Harvest Derived
@@ -711,15 +711,15 @@ storeGetMemSpace(int size)
int released = 0;
static time_t last_check = 0;
int pages_needed;
- dlink_node *m;
- dlink_node *prev = NULL;
int locked = 0;
#if !HEAP_REPLACEMENT
dlink_node *head;
+ dlink_node *m;
+ dlink_node *prev = NULL;
#else
heap *heap = inmem_heap;
heap_key age, min_age = 0.0;
- linklist *locked_entries = NULL;
+ link_list *locked_entries = NULL;
#endif
if (squid_curtime == last_check)
return;
@@ -739,7 +739,7 @@ storeGetMemSpace(int size)
locked++;
debug(20, 5) ("storeGetMemSpace: locked key %s\n",
storeKeyText(e->key));
- linklistPush(e, &locked_entries);
+ linklistPush(&locked_entries, e);
continue;
}
released++;
@@ -758,7 +758,7 @@ storeGetMemSpace(int size)
/*
* Reinsert all bumped locked entries back into heap...
*/
- while ((e = linklistPop(&locked_entries)))
+ while ((e = linklistShift(&locked_entries)))
e->mem_obj->node = heap_insert(inmem_heap, e);
#else
head = inmem_list.head;
@@ -800,8 +800,6 @@ storeGetMemSpace(int size)
void
storeMaintainSwapSpace(void *datanotused)
{
- dlink_node *m;
- dlink_node *prev = NULL;
StoreEntry *e = NULL;
int scanned = 0;
int locked = 0;
@@ -810,10 +808,13 @@ storeMaintainSwapSpace(void *datanotused)
int max_remove;
double f;
static time_t last_warn_time = 0;
-#if HEAP_REPLACEMENT
+#if !HEAP_REPLACEMENT
+ dlink_node *m;
+ dlink_node *prev = NULL;
+#else
heap *heap = store_heap;
heap_key age, min_age = 0.0;
- linklist *locked_entries = NULL;
+ link_list *locked_entries = NULL;
#if HEAP_REPLACEMENT_DEBUG
if (!verify_heap_property(store_heap)) {
debug(20, 1) ("Heap property violated!\n");
@@ -835,6 +836,12 @@ storeMaintainSwapSpace(void *datanotused)
f, max_scan, max_remove);
#if HEAP_REPLACEMENT
while (heap_nodes(heap) > 0) {
+ if (store_swap_size < store_swap_low)
+ break;
+ if (expired >= max_remove)
+ break;
+ if (scanned >= max_scan)
+ break;
age = heap_peepminkey(heap);
e = heap_extractmin(heap);
e->node = NULL; /* no longer in the heap */
@@ -850,7 +857,7 @@ storeMaintainSwapSpace(void *datanotused)
*/
debug(20, 4) ("storeMaintainSwapSpace: locked url %s\n",
(e->mem_obj && e->mem_obj->url) ? e->mem_obj->url : storeKeyText(e->key));
- linklistPush(e, &locked_entries);
+ linklistPush(&locked_entries, e);
}
locked++;
continue;
@@ -872,13 +879,9 @@ storeMaintainSwapSpace(void *datanotused)
*/
debug(20, 5) ("storeMaintainSwapSpace: non-expired %s\n",
storeKeyText(e->key));
- linklistAdd(e, &locked_entries);
+ linklistPush(&locked_entries, e);
continue;
}
- if ((store_swap_size < store_swap_low)
- || (expired >= max_remove)
- || (scanned >= max_scan))
- break;
}
/*
* Bump the heap age factor.
@@ -888,7 +891,7 @@ storeMaintainSwapSpace(void *datanotused)
/*
* Reinsert all bumped locked entries back into heap...
*/
- while ((e = linklistPop(&locked_entries)))
+ while ((e = linklistShift(&locked_entries)))
e->node = heap_insert(store_heap, e);
#else
for (m = store_list.tail; m; m = prev) {
@@ -1229,7 +1232,7 @@ storeFreeMemory(void)
hashFreeItems(store_table, destroy_StoreEntry);
hashFreeMemory(store_table);
store_table = NULL;
-#if USE_CACHE_DIGEST
+#if USE_CACHE_DIGESTS
if (store_digest)
cacheDigestDestroy(store_digest);
#endif
@@ -1454,6 +1457,7 @@ storeEntryReset(StoreEntry * e)
mem->inmem_hi = mem->inmem_lo = 0;
httpReplyDestroy(mem->reply);
mem->reply = httpReplyCreate();
+ e->expires = e->lastmod = e->timestamp = -1;
}
#if HEAP_REPLACEMENT
diff --git a/src/store_client.cc b/src/store_client.cc
index 6d58137d8c..64b4dd2cb2 100644
--- a/src/store_client.cc
+++ b/src/store_client.cc
@@ -1,6 +1,6 @@
/*
- * $Id: store_client.cc,v 1.76 1999/09/29 00:10:33 wessels Exp $
+ * $Id: store_client.cc,v 1.77 1999/10/04 05:05:32 wessels Exp $
*
* DEBUG: section 20 Storage Manager Client-Side Interface
* AUTHOR: Duane Wessels
@@ -333,7 +333,7 @@ storeClientReadBody(void *data, const char *buf, ssize_t len)
assert(sc->callback != NULL);
debug(20, 3) ("storeClientReadBody: len %d\n", len);
if (sc->copy_offset == 0 && len > 0 && mem->reply->sline.status == 0)
- httpReplyParse(mem->reply, sc->copy_buf);
+ httpReplyParse(mem->reply, sc->copy_buf, headersEnd(sc->copy_buf, len));
sc->callback = NULL;
callback(sc->callback_data, sc->copy_buf, len);
}
@@ -360,6 +360,13 @@ storeClientReadHeader(void *data, const char *buf, ssize_t len)
return;
}
tlv_list = storeSwapMetaUnpack(buf, &swap_hdr_sz);
+ if (swap_hdr_sz > len) {
+ /* oops, bad disk file? */
+ debug(20, 1) ("storeClientReadHeader: header too small\n");
+ sc->callback = NULL;
+ callback(sc->callback_data, sc->copy_buf, -1);
+ return;
+ }
if (tlv_list == NULL) {
debug(20, 1) ("storeClientReadHeader: failed to unpack meta data\n");
sc->callback = NULL;
@@ -387,7 +394,8 @@ storeClientReadHeader(void *data, const char *buf, ssize_t len)
copy_sz);
xmemmove(sc->copy_buf, sc->copy_buf + swap_hdr_sz, copy_sz);
if (sc->copy_offset == 0 && len > 0 && mem->reply->sline.status == 0)
- httpReplyParse(mem->reply, sc->copy_buf);
+ httpReplyParse(mem->reply, sc->copy_buf,
+ headersEnd(sc->copy_buf, copy_sz));
sc->callback = NULL;
callback(sc->callback_data, sc->copy_buf, copy_sz);
return;
diff --git a/src/store_log.cc b/src/store_log.cc
index 9a2f1c0ea5..4c68edd6b3 100644
--- a/src/store_log.cc
+++ b/src/store_log.cc
@@ -1,6 +1,6 @@
/*
- * $Id: store_log.cc,v 1.8 1999/08/02 06:18:46 wessels Exp $
+ * $Id: store_log.cc,v 1.9 1999/10/04 05:05:34 wessels Exp $
*
* DEBUG: section 20 Storage Manager Logging Functions
* AUTHOR: Duane Wessels
@@ -55,6 +55,8 @@ storeLog(int tag, const StoreEntry * e)
return;
if (mem == NULL)
return;
+ if (EBIT_TEST(e->flags, ENTRY_DONT_LOG))
+ return;
if (mem->log_url == NULL) {
debug(20, 1) ("storeLog: NULL log_url for %s\n", mem->url);
storeMemObjectDump(mem);
@@ -71,7 +73,7 @@ storeLog(int tag, const StoreEntry * e)
(int) reply->date,
(int) reply->last_modified,
(int) reply->expires,
- strBuf(reply->content_type) ? strBuf(reply->content_type) : "unknown",
+ strLen(reply->content_type) ? strBuf(reply->content_type) : "unknown",
reply->content_length,
(int) (mem->inmem_hi - mem->reply->hdr_sz),
RequestMethodStr[mem->method],
diff --git a/src/store_swapin.cc b/src/store_swapin.cc
index b0e3b4f3ed..06aae94e21 100644
--- a/src/store_swapin.cc
+++ b/src/store_swapin.cc
@@ -1,6 +1,6 @@
/*
- * $Id: store_swapin.cc,v 1.20 1999/08/02 06:18:46 wessels Exp $
+ * $Id: store_swapin.cc,v 1.21 1999/10/04 05:05:34 wessels Exp $
*
* DEBUG: section 20 Storage Manager Swapin Functions
* AUTHOR: Duane Wessels
@@ -71,8 +71,14 @@ static void
storeSwapInFileClosed(void *data, int errflag, storeIOState * sio)
{
store_client *sc = data;
+ STCB *callback;
debug(20, 3) ("storeSwapInFileClosed: sio=%p, errflag=%d\n",
sio, errflag);
cbdataUnlock(sio);
sc->swapin_sio = NULL;
+ if ((callback = sc->callback)) {
+ assert(errflag <= 0);
+ sc->callback = NULL;
+ callback(sc->callback_data, sc->copy_buf, errflag);
+ }
}
diff --git a/src/structs.h b/src/structs.h
index 929bf7492d..e4566f6143 100644
--- a/src/structs.h
+++ b/src/structs.h
@@ -1,6 +1,6 @@
/*
- * $Id: structs.h,v 1.306 1999/09/29 00:22:20 wessels Exp $
+ * $Id: structs.h,v 1.307 1999/10/04 05:05:35 wessels Exp $
*
*
* SQUID Internet Object Cache http://squid.nlanr.net/Squid/
@@ -75,6 +75,8 @@ struct _acl_proxy_auth_user {
char *passwd;
int passwd_ok; /* 1 = passwd checked OK */
long expiretime;
+ struct in_addr ipaddr; /* IP addr this user authenticated from */
+ time_t ip_expiretime;
};
struct _acl_deny_info_list {
@@ -277,7 +279,6 @@ struct _SquidConfig {
struct {
char *configFile;
char *agentInfo;
- u_short localPort;
} Snmp;
#endif
#if USE_WCCP
@@ -310,6 +311,7 @@ struct _SquidConfig {
int redirectChildren;
int authenticateChildren;
int authenticateTTL;
+ int authenticateIpTTL;
struct {
char *host;
u_short port;
@@ -396,6 +398,7 @@ struct _SquidConfig {
int prefer_direct;
int strip_query_terms;
int redirector_bypass;
+ int ignore_unknown_nameservers;
} onoff;
acl *aclList;
struct {
@@ -413,6 +416,7 @@ struct _SquidConfig {
#if USE_IDENT
acl_access *identLookup;
#endif
+ acl_access *redirector;
} accessList;
acl_deny_info_list *denyInfoList;
char *proxyAuthRealm;
@@ -625,8 +629,8 @@ struct _HttpHdrCc {
/* http byte-range-spec */
struct _HttpHdrRangeSpec {
- size_t offset;
- size_t length;
+ ssize_t offset;
+ ssize_t length;
};
/* There may be more than one byte range specified in the request.
@@ -640,7 +644,7 @@ struct _HttpHdrRange {
/* http content-range header field */
struct _HttpHdrContRange {
HttpHdrRangeSpec spec;
- size_t elength; /* entity length, not content length */
+ ssize_t elength; /* entity length, not content length */
};
/* some fields can hold either time or etag specs (e.g. If-Range) */
@@ -654,8 +658,8 @@ struct _TimeOrTag {
struct _HttpHdrRangeIter {
HttpHdrRangePos pos;
const HttpHdrRangeSpec *spec; /* current spec at pos */
- size_t debt_size; /* bytes left to send from the current spec */
- size_t prefix_size; /* the size of the incoming HTTP msg prefix */
+ ssize_t debt_size; /* bytes left to send from the current spec */
+ ssize_t prefix_size; /* the size of the incoming HTTP msg prefix */
String boundary; /* boundary for multipart responses */
};
@@ -1437,6 +1441,7 @@ struct _request_t {
HierarchyLogEntry hier;
err_type err_type;
char *peer_login; /* Configured peer login:password */
+ time_t lastmod; /* Used on refreshes */
};
struct _cachemgr_passwd {
@@ -1491,11 +1496,11 @@ struct _ErrorState {
unsigned int flag_cbdata:1;
} flags;
struct {
+ wordlist *server_msg;
char *request;
char *reply;
} ftp;
char *request_hdrs;
- wordlist *ftp_server_msg;
};
/*
@@ -1660,8 +1665,8 @@ struct _storeSwapLogData {
/* object to track per-action memory usage (e.g. #idle objects) */
struct _MemMeter {
- size_t level; /* current level (count or volume) */
- size_t hwater_level; /* high water mark */
+ ssize_t level; /* current level (count or volume) */
+ ssize_t hwater_level; /* high water mark */
time_t hwater_stamp; /* timestamp of last high water mark change */
};
@@ -1699,6 +1704,7 @@ struct _ClientInfo {
int n_req;
int n_denied;
} cutoff;
+ int n_established; /* number of current established connections */
};
struct _CacheDigest {
@@ -1728,6 +1734,7 @@ struct _FwdState {
int n_tries;
struct {
unsigned int dont_retry:1;
+ unsigned int ftp_pasv_failed:1;
} flags;
};
diff --git a/src/tools.cc b/src/tools.cc
index 231dbd4e0a..4fcc9a589d 100644
--- a/src/tools.cc
+++ b/src/tools.cc
@@ -1,6 +1,6 @@
/*
- * $Id: tools.cc,v 1.186 1999/08/02 06:18:49 wessels Exp $
+ * $Id: tools.cc,v 1.187 1999/10/04 05:05:36 wessels Exp $
*
* DEBUG: section 21 Misc Functions
* AUTHOR: Harvest Derived
@@ -305,9 +305,9 @@ fatal_common(const char *message)
#if HAVE_SYSLOG
syslog(LOG_ALERT, "%s", message);
#endif
- fprintf(debug_log, "FATAL: pid %d %s\n", (int) getpid(), message);
- if (opt_debug_stderr && debug_log != stderr)
- fprintf(stderr, "FATAL: pid %d %s\n", (int) getpid(), message);
+ fprintf(debug_log, "FATAL: %s\n", message);
+ if (opt_debug_stderr > 0 && debug_log != stderr)
+ fprintf(stderr, "FATAL: %s\n", message);
fprintf(debug_log, "Squid Cache (Version %s): Terminated abnormally.\n",
version_string);
fflush(debug_log);
@@ -867,7 +867,6 @@ linklistShift(link_list ** L)
return p;
}
-
/*
* Same as rename(2) but complains if something goes wrong;
* the caller is responsible for handing and explaining the
@@ -883,3 +882,16 @@ xrename(const char *from, const char *to)
from, to, xstrerror());
return -1;
}
+
+int
+stringHasCntl(const char *s)
+{
+ unsigned char c;
+ while ((c = (unsigned char) *s++) != '\0') {
+ if (c <= 0x1f)
+ return 1;
+ if (c >= 0x7f && c <= 0x9f)
+ return 1;
+ }
+ return 0;
+}
diff --git a/src/tunnel.cc b/src/tunnel.cc
index 3e7fefe1ac..4477d9af8d 100644
--- a/src/tunnel.cc
+++ b/src/tunnel.cc
@@ -1,6 +1,6 @@
/*
- * $Id: tunnel.cc,v 1.99 1999/08/02 06:18:41 wessels Exp $
+ * $Id: tunnel.cc,v 1.100 1999/10/04 05:05:28 wessels Exp $
*
* DEBUG: section 26 Secure Sockets Layer Proxy
* AUTHOR: Duane Wessels
@@ -206,8 +206,8 @@ sslReadServer(int fd, void *data)
}
cbdataLock(sslState);
if (len < 0) {
- debug(50, 1) ("sslReadServer: FD %d: read failure: %s\n",
- fd, xstrerror());
+ debug(50, ignoreErrno(errno) ? 3 : 1)
+ ("sslReadServer: FD %d: read failure: %s\n", fd, xstrerror());
if (!ignoreErrno(errno))
comm_close(fd);
} else if (len == 0) {
@@ -281,8 +281,8 @@ sslWriteServer(int fd, void *data)
}
cbdataLock(sslState);
if (len < 0) {
- debug(50, 1) ("sslWriteServer: FD %d: write failure: %s.\n",
- fd, xstrerror());
+ debug(50, ignoreErrno(errno) ? 3 : 1)
+ ("sslWriteServer: FD %d: write failure: %s.\n", fd, xstrerror());
if (!ignoreErrno(errno))
comm_close(fd);
}
@@ -322,8 +322,8 @@ sslWriteClient(int fd, void *data)
}
cbdataLock(sslState);
if (len < 0) {
- debug(50, 1) ("sslWriteClient: FD %d: write failure: %s.\n",
- fd, xstrerror());
+ debug(50, ignoreErrno(errno) ? 3 : 1)
+ ("sslWriteClient: FD %d: write failure: %s.\n", fd, xstrerror());
if (!ignoreErrno(errno))
comm_close(fd);
}
diff --git a/src/typedefs.h b/src/typedefs.h
index 65a0d5c44f..83758775ee 100644
--- a/src/typedefs.h
+++ b/src/typedefs.h
@@ -1,6 +1,6 @@
/*
- * $Id: typedefs.h,v 1.97 1999/06/30 06:29:04 wessels Exp $
+ * $Id: typedefs.h,v 1.98 1999/10/04 05:05:37 wessels Exp $
*
*
* SQUID Internet Object Cache http://squid.nlanr.net/Squid/
@@ -246,7 +246,7 @@ typedef unsigned char cache_key;
typedef int Ctx;
/* in case we want to change it later */
-typedef size_t mb_size_t;
+typedef ssize_t mb_size_t;
/* iteration for HttpHdrRange */
typedef int HttpHdrRangePos;
diff --git a/src/unlinkd.cc b/src/unlinkd.cc
index 52bb82b255..acb435bf19 100644
--- a/src/unlinkd.cc
+++ b/src/unlinkd.cc
@@ -1,5 +1,5 @@
/*
- * $Id: unlinkd.cc,v 1.31 1999/07/13 14:51:29 wessels Exp $
+ * $Id: unlinkd.cc,v 1.32 1999/10/04 05:05:38 wessels Exp $
*
* DEBUG: section 12 Unlink Daemon
* AUTHOR: Duane Wessels
@@ -45,15 +45,21 @@ main(int argc, char *argv[])
{
char buf[UNLINK_BUF_LEN];
char *t;
+ int x;
setbuf(stdin, NULL);
+ setbuf(stdout, NULL);
while (fgets(buf, UNLINK_BUF_LEN, stdin)) {
if ((t = strchr(buf, '\n')))
*t = '\0';
#if USE_TRUNCATE
- truncate(buf, 0);
+ x = truncate(buf, 0);
#else
- unlink(buf);
+ x = unlink(buf);
#endif
+ if (x < 0)
+ printf("ERR\n");
+ else
+ printf("OK\n");
}
exit(0);
}
@@ -67,29 +73,74 @@ static int unlinkd_wfd = -1;
static int unlinkd_rfd = -1;
#endif
+#define UNLINKD_QUEUE_LIMIT 20
+
void
unlinkdUnlink(const char *path)
{
#if USE_UNLINKD
- char *buf;
+ char buf[MAXPATHLEN];
int l;
+ int x;
+ static int queuelen = 0;
if (unlinkd_wfd < 0) {
debug_trap("unlinkdUnlink: unlinkd_wfd < 0");
safeunlink(path, 0);
return;
}
- l = strlen(path) + 1;
- buf = xcalloc(1, l + 1);
- strcpy(buf, path);
- strcat(buf, "\n");
- file_write(unlinkd_wfd,
- -1,
- buf,
- l,
- NULL, /* Handler */
- NULL, /* Handler-data */
- xfree);
+ /*
+ * If the queue length is greater than our limit, then
+ * we pause for up to 100ms, hoping that unlinkd
+ * has some feedback for us. Maybe it just needs a slice
+ * of the CPU's time.
+ */
+ if (queuelen >= UNLINKD_QUEUE_LIMIT) {
+ struct timeval to;
+ fd_set R;
+ int x;
+ FD_ZERO(&R);
+ FD_SET(unlinkd_rfd, &R);
+ to.tv_sec = 0;
+ to.tv_usec = 100000;
+ x = select(unlinkd_rfd + 1, &R, NULL, NULL, &to);
+ }
+ /*
+ * If there is at least one outstanding unlink request, then
+ * try to read a response. If there's nothing to read we'll
+ * get an EWOULDBLOCK or whatever. If we get a response, then
+ * decrement the queue size by the number of newlines read.
+ */
+ if (queuelen > 0) {
+ int x;
+ int i;
+ char rbuf[512];
+ x = read(unlinkd_rfd, rbuf, 511);
+ if (x > 0) {
+ rbuf[x] = '\0';
+ for (i = 0; i < x; i++)
+ if ('\n' == rbuf[i])
+ queuelen--;
+ assert(queuelen >= 0);
+ }
+ }
+ l = strlen(path);
+ assert(l < MAXPATHLEN);
+ xstrncpy(buf, path, MAXPATHLEN);
+ buf[l++] = '\n';
+ x = write(unlinkd_wfd, buf, l);
+ if (x < 0) {
+ debug(50, 1) ("unlinkdUnlink: write FD %d failed: %s\n",
+ unlinkd_wfd, xstrerror());
+ safeunlink(path, 0);
+ return;
+ } else if (x != l) {
+ debug(50, 1) ("unlinkdUnlink: FD %d only wrote %d of %d bytes\n",
+ unlinkd_wfd, x, l);
+ safeunlink(path, 0);
+ return;
+ }
Counter.unlink.requests++;
+ queuelen++;
#endif
}
@@ -136,9 +187,15 @@ unlinkdInit(void)
fd_note(unlinkd_rfd, "unlinkd -> squid");
commSetTimeout(unlinkd_rfd, -1, NULL, NULL);
commSetTimeout(unlinkd_wfd, -1, NULL, NULL);
- commSetNonBlocking(unlinkd_wfd);
+ /*
+ * We leave unlinkd_wfd blocking, because we never want to lose an
+ * unlink request, and we don't have code to retry if we get
+ * EWOULDBLOCK.
+ */
commSetNonBlocking(unlinkd_rfd);
debug(12, 1) ("Unlinkd pipe opened on FD %d\n", unlinkd_wfd);
+#else
+ debug(12, 1) ("Unlinkd is disabled\n");
#endif
}
diff --git a/src/url.cc b/src/url.cc
index a998a2028c..e454e7920d 100644
--- a/src/url.cc
+++ b/src/url.cc
@@ -1,6 +1,6 @@
/*
- * $Id: url.cc,v 1.118 1999/08/02 06:18:49 wessels Exp $
+ * $Id: url.cc,v 1.119 1999/10/04 05:05:38 wessels Exp $
*
* DEBUG: section 23 URL Parsing
* AUTHOR: Duane Wessels
@@ -123,6 +123,25 @@ urlInitialize(void)
debug(23, 5) ("urlInitialize: Initializing...\n");
assert(sizeof(ProtocolStr) == (PROTO_MAX + 1) * sizeof(char *));
memset(&null_request_flags, '\0', sizeof(null_request_flags));
+ /*
+ * These test that our matchDomainName() function works the
+ * way we expect it to.
+ */
+ assert(0 == matchDomainName("foo.com", "foo.com"));
+ assert(0 == matchDomainName(".foo.com", "foo.com"));
+ assert(0 == matchDomainName("foo.com", ".foo.com"));
+ assert(0 == matchDomainName(".foo.com", ".foo.com"));
+ assert(0 == matchDomainName("x.foo.com", ".foo.com"));
+ assert(0 != matchDomainName("x.foo.com", "foo.com"));
+ assert(0 != matchDomainName("foo.com", "x.foo.com"));
+ assert(0 != matchDomainName("bar.com", "foo.com"));
+ assert(0 != matchDomainName(".bar.com", "foo.com"));
+ assert(0 != matchDomainName(".bar.com", ".foo.com"));
+ assert(0 != matchDomainName("bar.com", ".foo.com"));
+ assert(0 < matchDomainName("zzz.com", "foo.com"));
+ assert(0 > matchDomainName("aaa.com", "foo.com"));
+ assert(0 == matchDomainName("FOO.com", "foo.COM"));
+ /* more cases? */
}
method_t
@@ -292,7 +311,7 @@ urlParse(method_t method, char *url)
case URI_WHITESPACE_ALLOW:
break;
case URI_WHITESPACE_ENCODE:
- t = rfc1738_escape(urlpath);
+ t = rfc1738_escape_unescaped(urlpath);
xstrncpy(urlpath, t, MAX_URL);
break;
case URI_WHITESPACE_CHOP:
@@ -385,26 +404,83 @@ urlCanonicalClean(const request_t * request)
break;
}
}
- if (stringHasWhitespace(buf))
- xstrncpy(buf, rfc1738_escape(buf), MAX_URL);
+ if (stringHasCntl(buf))
+ xstrncpy(buf, rfc1738_escape_unescaped(buf), MAX_URL);
return buf;
}
+/*
+ * matchDomainName() compares a hostname with a domainname according
+ * to the following rules:
+ *
+ * HOST DOMAIN MATCH?
+ * ------------- ------------- ------
+ * foo.com foo.com YES
+ * .foo.com foo.com YES
+ * x.foo.com foo.com NO
+ * foo.com .foo.com YES
+ * .foo.com .foo.com YES
+ * x.foo.com .foo.com YES
+ *
+ * We strip leading dots on hosts (but not domains!) so that
+ * ".foo.com" is is always the same as "foo.com".
+ *
+ * Return values:
+ * 0 means the host matches the domain
+ * 1 means the host is greater than the domain
+ * -1 means the host is less than the domain
+ */
+
int
-matchDomainName(const char *domain, const char *host)
+matchDomainName(const char *h, const char *d)
{
- int offset;
- if ((offset = strlen(host) - strlen(domain)) < 0)
- return 0; /* host too short */
- if (strcasecmp(domain, host + offset) != 0)
- return 0; /* no match at all */
- if (*domain == '.')
- return 1;
- if (offset == 0)
- return 1;
- if (*(host + offset - 1) == '.')
- return 1;
- return 0;
+ int dl;
+ int hl;
+ while ('.' == *h)
+ h++;
+ hl = strlen(h);
+ dl = strlen(d);
+ /*
+ * Start at the ends of the two strings and work towards the
+ * beginning.
+ */
+ while (xtolower(h[--hl]) == xtolower(d[--dl])) {
+ if (hl == 0 && dl == 0) {
+ /*
+ * We made it all the way to the beginning of both
+ * strings without finding any difference.
+ */
+ return 0;
+ }
+ if (0 == hl) {
+ /*
+ * The host string is shorter than the domain string.
+ * There is only one case when this can be a match.
+ * If the domain is just one character longer, and if
+ * that character is a leading '.' then we call it a
+ * match.
+ */
+ if (1 == dl && '.' == d[0])
+ return 0;
+ else
+ return -1;
+ }
+ if (0 == dl) {
+ /*
+ * The domain string is shorter than the host string.
+ * This is a match only if the first domain character
+ * is a leading '.'.
+ */
+ if ('.' == d[0])
+ return 0;
+ else
+ return 1;
+ }
+ }
+ /*
+ * We found different characters in the same position (from the end).
+ */
+ return (xtolower(h[hl]) - xtolower(d[dl]));
}
int
@@ -422,7 +498,6 @@ urlCheckRequest(const request_t * r)
switch (r->protocol) {
case PROTO_URN:
case PROTO_HTTP:
- case PROTO_HTTPS:
case PROTO_CACHEOBJ:
rc = 1;
break;
@@ -437,6 +512,13 @@ urlCheckRequest(const request_t * r)
else if (r->method == METHOD_HEAD)
rc = 1;
break;
+ case PROTO_HTTPS:
+ /*
+ * Squid can't originate an SSL connection, so it should
+ * never receive an "https:" URL. It should always be
+ * CONNECT instead.
+ */
+ rc = 0;
default:
break;
}
diff --git a/src/urn.cc b/src/urn.cc
index d44e65e356..98588f7a30 100644
--- a/src/urn.cc
+++ b/src/urn.cc
@@ -1,7 +1,7 @@
/*
*
- * $Id: urn.cc,v 1.55 1999/05/04 21:58:46 wessels Exp $
+ * $Id: urn.cc,v 1.56 1999/10/04 05:05:39 wessels Exp $
*
* DEBUG: section 52 URN Parsing
* AUTHOR: Kostas Anagnostakis
@@ -217,7 +217,7 @@ urnHandleReply(void *data, char *buf, ssize_t size)
}
s = buf + k;
assert(urlres_e->mem_obj->reply);
- httpReplyParse(urlres_e->mem_obj->reply, buf);
+ httpReplyParse(urlres_e->mem_obj->reply, buf, k);
debug(52, 3) ("mem->reply exists, code=%d.\n",
urlres_e->mem_obj->reply->sline.status);
if (urlres_e->mem_obj->reply->sline.status != HTTP_OK) {
diff --git a/src/wccp.cc b/src/wccp.cc
index 57d15a2b27..428b6d0839 100644
--- a/src/wccp.cc
+++ b/src/wccp.cc
@@ -1,6 +1,6 @@
/*
- * $Id: wccp.cc,v 1.9 1999/08/02 06:18:51 wessels Exp $
+ * $Id: wccp.cc,v 1.10 1999/10/04 05:05:40 wessels Exp $
*
* DEBUG: section 80 WCCP Support
* AUTHOR: Glenn Chisholm
@@ -277,8 +277,7 @@ static void
wccpAssignBuckets(void *voidnotused)
{
struct wccp_assign_bucket_t wccp_assign_bucket;
- int number_buckets;
- int loop_buckets;
+ int buckets_per_cache;
int loop;
int number_caches;
int bucket = 0;
@@ -296,12 +295,14 @@ wccpAssignBuckets(void *voidnotused)
number_caches = WCCP_ACTIVE_CACHES;
caches = xmalloc(sizeof(int) * number_caches);
- number_buckets = WCCP_BUCKETS / number_caches;
+ buckets_per_cache = WCCP_BUCKETS / number_caches;
for (loop = 0; loop < number_caches; loop++) {
+ int i;
xmemcpy(&caches[loop],
&wccp_i_see_you.wccp_cache_entry[loop].ip_addr.s_addr,
sizeof(*caches));
- for (loop_buckets = 0; loop_buckets < number_buckets; loop_buckets++) {
+ for (i = 0; i < buckets_per_cache; i++) {
+ assert(bucket < WCCP_BUCKETS);
buckets[bucket++] = loop;
}
}