From: wessels <> Date: Mon, 4 Oct 1999 11:04:00 +0000 (+0000) Subject: 2.3 branch merge X-Git-Tag: SQUID_3_0_PRE1~2116 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9bc73deb181f454f4e5597c67c5b6b40fd4d0f58;p=thirdparty%2Fsquid.git 2.3 branch merge --- diff --git a/ChangeLog b/ChangeLog index 2a862b7075..fd7f410456 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,43 @@ +Changes to Squid-2.3.DEVEL2 (): + + - Added --enable-truncate configure option. + - Updated Czech error messages () + - Updated French error messages () + - Updated Spanish error messages () + - Added xrename() function for better debugging. + - Disallow empty ("") password in aclDecodeProxyAuth() + (BoB Miorelli). + - Fixed ACL SPLAY subdomain detection (again). + - Increased default 'request_body_max_size' from 100KB + to 1MB in cf.data.pre. + - Added 'content_length' member to request_t structure + so we don't have to use httpHdrGetInt() so often. + - Fixed repeatedly calling memDataInit() for every reconfigure. + - Cleaned up the case when fwdDispatch() cannot forward a + request. Error messages used to report "[no URL]". + - Added a check to return specific error messages for a + "store_digest" request when the digest entry doesn't exist + and we reach internalStart(). + - Changed the interface of storeSwapInStart() to avoid a bug + where we closed "sc->swapin_sio" but couldn't set the + pointer to NULL. + - Changed storeDirClean() so that the rate it gets called + depends on the number of objects deleted. + - Some WCCP fixes. + - Added 'hostname_aliases' option to detect internal requests + (cache digests) when a cache has more than one hostname + in use. + - Async I/O NUMTHREADS now configurable with --enable-async-io=N + (Henrik Nordstrom). + - Added queue length to async I/O cachemgr stats (Henrik Nordstrom). + - Added OPTIONS request method. + - WCCP establishes and registers with the router faster. + - Added 'maxconn' acl type to limit the number of established + connections from a single client IP address. Submitted + by Vadim Kolontsov. + - Close FTP data socket as soon as transfer completes + (Alexander V. Lukyanov). + Changes to Squid-2.3.DEVEL1 (): - Added WCCP support. This adds the 'wccp_router' squid.conf @@ -25,7 +65,6 @@ Changes to Squid-2.3.DEVEL1 (): - Added 'peer_connect_timeout' squid.conf option. - Added 'redirector_bypass' squid.conf option. - Added RFC 2518 (WEBDAV) request methods. - Changes to Squid-2.2 (April 19, 1999): @@ -233,6 +272,25 @@ Changes to Squid-2.2 (April 19, 1999): if a cache_dir subdirectory didn't exist. - Fixed a buffer overrun bug in gb_to_str(). + 2.2.STABLE4: + + - Fixed a dread_ctrl leak caused in store_client.c + - Fixed a memory leak in eventRun(). + - Fixed a memory leak of ErrorState structures due to + a bug in forward.c. + - Fixed detection of subdomain collisions for SPLAY trees. + - Fixed logging of hierarchy codes for SSL requests (Henrik + Nordstrom). + - Added some descriptions to mib.txt. + - Fixed a bug with non-hierarchical requests (e.g. POST) + and cache digests. We used to look up non-hierarchical + requests in peer digests. A false hit may cause Squid + to forward a request to a sibling. In combination with + 'Cache-control: only-if-cached, this generates 504 Gateway + Timeout responses and the request may not be re-forwardable. + - Fixed a filedescriptor leak for some aborted requests. + + Changes to Squid-2.1 (November 16, 1998): - Changed delayPoolsUpdate() to be called as an event. diff --git a/cfgaux/config.guess b/cfgaux/config.guess index d5da525231..1371b5fecb 100755 --- a/cfgaux/config.guess +++ b/cfgaux/config.guess @@ -555,6 +555,9 @@ EOF news*:NEWS-OS:[56].*:*) echo mips-sony-newsos${UNAME_RELEASE} exit 0 ;; + *:Rhapsody:*:*) + echo `arch`-apple-rhapsody${UNAME_RELEASE} + exit 0;; i?86:OS/2:*:*) echo ${UNAME_MACHINE}-ibm-os2 exit 0 ;; diff --git a/cfgaux/config.sub b/cfgaux/config.sub index f751a9eed8..ac63ffeede 100755 --- a/cfgaux/config.sub +++ b/cfgaux/config.sub @@ -753,6 +753,9 @@ case $os in -xenix) os=-xenix ;; + -rhapsody*) + os=-rhapsody + ;; -none) ;; -os2) diff --git a/configure b/configure index 026173dc57..cb357e7826 100755 --- a/configure +++ b/configure @@ -27,7 +27,11 @@ ac_help="$ac_help ac_help="$ac_help --enable-carp Enable CARP support" ac_help="$ac_help - --enable-async-io Do ASYNC disk I/O using threads" + --enable-async-io[=N_THREADS] + Do ASYNC disk I/O using threads. + N_THREADS is the number of worker threads + defaults to 16. See also src/squid.h for + some additional platform tuning" ac_help="$ac_help --enable-icmp Enable ICMP pinging" ac_help="$ac_help @@ -95,6 +99,13 @@ ac_help="$ac_help performance improvement, but may cause problems when used with async I/O. Truncate uses more filesystem inodes than unlink.." +ac_help="$ac_help + --enable-underscores Squid by default rejects any host names with _ + in their name to conform with internet standars. + If you disagree with this you may allow _ in + hostnames by using this switch, provided that + the resolver library on the host where Squid runs + does not reject _ in hostnames..." # Initialize some variables set by options. # The variables have the same names as the options, with @@ -603,7 +614,7 @@ fi -# From configure.in Revision: 1.176.2.3 +# From configure.in Revision: 1.177 ac_aux_dir= for ac_dir in cfgaux $srcdir/cfgaux; do if test -f $ac_dir/install-sh; then @@ -631,7 +642,7 @@ else { echo "configure: error: can not run $ac_config_sub" 1>&2; exit 1; } fi echo $ac_n "checking host system type""... $ac_c" 1>&6 -echo "configure:635: checking host system type" >&5 +echo "configure:646: checking host system type" >&5 host_alias=$host case "$host_alias" in @@ -699,7 +710,7 @@ PRESET_CFLAGS="$CFLAGS" # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:703: checking for $ac_word" >&5 +echo "configure:714: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -728,7 +739,7 @@ if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:732: checking for $ac_word" >&5 +echo "configure:743: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_prog_CC'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -776,7 +787,7 @@ fi fi echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works""... $ac_c" 1>&6 -echo "configure:780: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5 +echo "configure:791: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) works" >&5 ac_ext=c # CFLAGS is not in ac_cpp because -g, -O, etc. are not valid cpp options. @@ -786,11 +797,11 @@ ac_link='${CC-cc} -o conftest $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS cross_compiling=$ac_cv_prog_cc_cross cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:805: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then ac_cv_prog_cc_works=yes # If we can't run a trivial program, we are probably using a cross compiler. if (./conftest; exit) 2>/dev/null; then @@ -810,12 +821,12 @@ if test $ac_cv_prog_cc_works = no; then { echo "configure: error: installation or configuration problem: C compiler cannot create executables." 1>&2; exit 1; } fi echo $ac_n "checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler""... $ac_c" 1>&6 -echo "configure:814: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5 +echo "configure:825: checking whether the C compiler ($CC $CFLAGS $LDFLAGS) is a cross-compiler" >&5 echo "$ac_t""$ac_cv_prog_cc_cross" 1>&6 cross_compiling=$ac_cv_prog_cc_cross echo $ac_n "checking whether we are using GNU C""... $ac_c" 1>&6 -echo "configure:819: checking whether we are using GNU C" >&5 +echo "configure:830: checking whether we are using GNU C" >&5 if eval "test \"`echo '$''{'ac_cv_prog_gcc'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -824,7 +835,7 @@ else yes; #endif EOF -if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:828: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then +if { ac_try='${CC-cc} -E conftest.c'; { (eval echo configure:839: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then ac_cv_prog_gcc=yes else ac_cv_prog_gcc=no @@ -839,7 +850,7 @@ if test $ac_cv_prog_gcc = yes; then ac_save_CFLAGS="$CFLAGS" CFLAGS= echo $ac_n "checking whether ${CC-cc} accepts -g""... $ac_c" 1>&6 -echo "configure:843: checking whether ${CC-cc} accepts -g" >&5 +echo "configure:854: checking whether ${CC-cc} accepts -g" >&5 if eval "test \"`echo '$''{'ac_cv_prog_cc_g'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1079,7 +1090,26 @@ fi # Check whether --enable-async_io or --disable-async_io was given. if test "${enable_async_io+set}" = set; then enableval="$enable_async_io" - if test "$enableval" = "yes" ; then + case "$enableval" in + yes) + async_io=yes + ;; + no) + async_io='' + ;; + *) + async_io=yes + cat >> confdefs.h <> confdefs.h <<\EOF @@ -1098,13 +1128,10 @@ EOF fi ;; esac - fi - fi - # Check whether --enable-icmp or --disable-icmp was given. if test "${enable_icmp+set}" = set; then enableval="$enable_icmp" @@ -1462,6 +1489,20 @@ EOF fi +# Check whether --enable-underscores or --disable-underscores was given. +if test "${enable_underscores+set}" = set; then + enableval="$enable_underscores" + if test "$enableval" = "yes" ; then + echo "Enabling the use of underscores in host names" + cat >> confdefs.h <<\EOF +#define ALLOW_HOSTNAME_UNDERSCORES 1 +EOF + + fi + +fi + + # Force some compilers to use ANSI features # case "$host" in @@ -1482,7 +1523,7 @@ case "$host" in esac echo $ac_n "checking how to run the C preprocessor""... $ac_c" 1>&6 -echo "configure:1486: checking how to run the C preprocessor" >&5 +echo "configure:1527: checking how to run the C preprocessor" >&5 # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= @@ -1497,13 +1538,13 @@ else # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. cat > conftest.$ac_ext < Syntax Error EOF ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out" -{ (eval echo configure:1507: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } +{ (eval echo configure:1548: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } ac_err=`grep -v '^ *+' conftest.out` if test -z "$ac_err"; then : @@ -1514,13 +1555,13 @@ else rm -rf conftest* CPP="${CC-cc} -E -traditional-cpp" cat > conftest.$ac_ext < Syntax Error EOF ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out" -{ (eval echo configure:1524: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } +{ (eval echo configure:1565: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } ac_err=`grep -v '^ *+' conftest.out` if test -z "$ac_err"; then : @@ -1553,7 +1594,7 @@ echo "$ac_t""$CPP" 1>&6 # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # ./install, which can be erroneously created by make from ./install.sh. echo $ac_n "checking for a BSD compatible install""... $ac_c" 1>&6 -echo "configure:1557: checking for a BSD compatible install" >&5 +echo "configure:1598: checking for a BSD compatible install" >&5 if test -z "$INSTALL"; then if eval "test \"`echo '$''{'ac_cv_path_install'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -1605,7 +1646,7 @@ test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1609: checking for $ac_word" >&5 +echo "configure:1650: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_prog_RANLIB'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1632,7 +1673,7 @@ else fi echo $ac_n "checking whether ln -s works""... $ac_c" 1>&6 -echo "configure:1636: checking whether ln -s works" >&5 +echo "configure:1677: checking whether ln -s works" >&5 if eval "test \"`echo '$''{'ac_cv_prog_LN_S'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1655,7 +1696,7 @@ fi # Extract the first word of "sh", so it can be a program name with args. set dummy sh; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1659: checking for $ac_word" >&5 +echo "configure:1700: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_SH'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1687,7 +1728,7 @@ fi # Extract the first word of "false", so it can be a program name with args. set dummy false; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1691: checking for $ac_word" >&5 +echo "configure:1732: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_FALSE'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1719,7 +1760,7 @@ fi # Extract the first word of "true", so it can be a program name with args. set dummy true; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1723: checking for $ac_word" >&5 +echo "configure:1764: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_TRUE'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1751,7 +1792,7 @@ fi # Extract the first word of "rm", so it can be a program name with args. set dummy rm; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1755: checking for $ac_word" >&5 +echo "configure:1796: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_RM'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1783,7 +1824,7 @@ fi # Extract the first word of "mv", so it can be a program name with args. set dummy mv; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1787: checking for $ac_word" >&5 +echo "configure:1828: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_MV'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1815,7 +1856,7 @@ fi # Extract the first word of "mkdir", so it can be a program name with args. set dummy mkdir; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1819: checking for $ac_word" >&5 +echo "configure:1860: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_MKDIR'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1847,7 +1888,7 @@ fi # Extract the first word of "ln", so it can be a program name with args. set dummy ln; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1851: checking for $ac_word" >&5 +echo "configure:1892: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_LN'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1879,7 +1920,7 @@ fi # Extract the first word of "perl", so it can be a program name with args. set dummy perl; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1883: checking for $ac_word" >&5 +echo "configure:1924: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_PERL'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1911,7 +1952,7 @@ fi # Extract the first word of "makedepend", so it can be a program name with args. set dummy makedepend; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1915: checking for $ac_word" >&5 +echo "configure:1956: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_MAKEDEPEND'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1943,7 +1984,7 @@ fi # Extract the first word of "ar", so it can be a program name with args. set dummy ar; ac_word=$2 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6 -echo "configure:1947: checking for $ac_word" >&5 +echo "configure:1988: checking for $ac_word" >&5 if eval "test \"`echo '$''{'ac_cv_path_AR'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -1993,12 +2034,12 @@ for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h do ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'` echo $ac_n "checking for $ac_hdr that defines DIR""... $ac_c" 1>&6 -echo "configure:1997: checking for $ac_hdr that defines DIR" >&5 +echo "configure:2038: checking for $ac_hdr that defines DIR" >&5 if eval "test \"`echo '$''{'ac_cv_header_dirent_$ac_safe'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #include <$ac_hdr> @@ -2006,7 +2047,7 @@ int main() { DIR *dirp = 0; ; return 0; } EOF -if { (eval echo configure:2010: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2051: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* eval "ac_cv_header_dirent_$ac_safe=yes" else @@ -2031,7 +2072,7 @@ done # Two versions of opendir et al. are in -ldir and -lx on SCO Xenix. if test $ac_header_dirent = dirent.h; then echo $ac_n "checking for opendir in -ldir""... $ac_c" 1>&6 -echo "configure:2035: checking for opendir in -ldir" >&5 +echo "configure:2076: checking for opendir in -ldir" >&5 ac_lib_var=`echo dir'_'opendir | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -2039,7 +2080,7 @@ else ac_save_LIBS="$LIBS" LIBS="-ldir $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:2095: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -2072,7 +2113,7 @@ fi else echo $ac_n "checking for opendir in -lx""... $ac_c" 1>&6 -echo "configure:2076: checking for opendir in -lx" >&5 +echo "configure:2117: checking for opendir in -lx" >&5 ac_lib_var=`echo x'_'opendir | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -2080,7 +2121,7 @@ else ac_save_LIBS="$LIBS" LIBS="-lx $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:2136: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -2114,12 +2155,12 @@ fi fi echo $ac_n "checking for ANSI C header files""... $ac_c" 1>&6 -echo "configure:2118: checking for ANSI C header files" >&5 +echo "configure:2159: checking for ANSI C header files" >&5 if eval "test \"`echo '$''{'ac_cv_header_stdc'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #include @@ -2127,7 +2168,7 @@ else #include EOF ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out" -{ (eval echo configure:2131: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } +{ (eval echo configure:2172: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } ac_err=`grep -v '^ *+' conftest.out` if test -z "$ac_err"; then rm -rf conftest* @@ -2144,7 +2185,7 @@ rm -f conftest* if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat > conftest.$ac_ext < EOF @@ -2162,7 +2203,7 @@ fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat > conftest.$ac_ext < EOF @@ -2183,7 +2224,7 @@ if test "$cross_compiling" = yes; then : else cat > conftest.$ac_ext < #define ISLOWER(c) ('a' <= (c) && (c) <= 'z') @@ -2194,7 +2235,7 @@ if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) exit(2); exit (0); } EOF -if { (eval echo configure:2198: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:2239: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then : else @@ -2281,17 +2322,17 @@ for ac_hdr in \ do ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'` echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6 -echo "configure:2285: checking for $ac_hdr" >&5 +echo "configure:2326: checking for $ac_hdr" >&5 if eval "test \"`echo '$''{'ac_cv_header_$ac_safe'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < EOF ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out" -{ (eval echo configure:2295: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } +{ (eval echo configure:2336: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; } ac_err=`grep -v '^ *+' conftest.out` if test -z "$ac_err"; then rm -rf conftest* @@ -2319,12 +2360,12 @@ done echo $ac_n "checking for working const""... $ac_c" 1>&6 -echo "configure:2323: checking for working const" >&5 +echo "configure:2364: checking for working const" >&5 if eval "test \"`echo '$''{'ac_cv_c_const'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2418: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_c_const=yes else @@ -2394,14 +2435,14 @@ EOF fi echo $ac_n "checking whether byte ordering is bigendian""... $ac_c" 1>&6 -echo "configure:2398: checking whether byte ordering is bigendian" >&5 +echo "configure:2439: checking whether byte ordering is bigendian" >&5 if eval "test \"`echo '$''{'ac_cv_c_bigendian'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else ac_cv_c_bigendian=unknown # See if sys/param.h defines the BYTE_ORDER macro. cat > conftest.$ac_ext < #include @@ -2412,11 +2453,11 @@ int main() { #endif ; return 0; } EOF -if { (eval echo configure:2416: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2457: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* # It does; now see whether it defined to BIG_ENDIAN or not. cat > conftest.$ac_ext < #include @@ -2427,7 +2468,7 @@ int main() { #endif ; return 0; } EOF -if { (eval echo configure:2431: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2472: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_c_bigendian=yes else @@ -2447,7 +2488,7 @@ if test "$cross_compiling" = yes; then { echo "configure: error: can not run test program while cross compiling" 1>&2; exit 1; } else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:2505: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then ac_cv_c_bigendian=no else @@ -2485,20 +2526,20 @@ fi echo $ac_n "checking if ANSI prototypes work""... $ac_c" 1>&6 -echo "configure:2489: checking if ANSI prototypes work" >&5 +echo "configure:2530: checking if ANSI prototypes work" >&5 if eval "test \"`echo '$''{'ac_cv_have_ansi_prototypes'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2543: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_ansi_prototypes="yes" else @@ -2520,13 +2561,13 @@ EOF fi echo $ac_n "checking for tm->tm_gmtoff""... $ac_c" 1>&6 -echo "configure:2524: checking for tm->tm_gmtoff" >&5 +echo "configure:2565: checking for tm->tm_gmtoff" >&5 if eval "test \"`echo '$''{'ac_cv_have_tm_gmoff'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #include @@ -2535,7 +2576,7 @@ struct tm foo; foo.tm_gmtoff = 0; ; return 0; } EOF -if { (eval echo configure:2539: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2580: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_tm_gmoff="yes" else @@ -2557,13 +2598,13 @@ EOF fi echo $ac_n "checking for struct mallinfo""... $ac_c" 1>&6 -echo "configure:2561: checking for struct mallinfo" >&5 +echo "configure:2602: checking for struct mallinfo" >&5 if eval "test \"`echo '$''{'ac_cv_have_struct_mallinfo'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if HAVE_MALLOC_H @@ -2581,7 +2622,7 @@ struct mallinfo foo; foo.keepcost = 0; ; return 0; } EOF -if { (eval echo configure:2585: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2626: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_struct_mallinfo="yes" else @@ -2603,13 +2644,13 @@ EOF fi echo $ac_n "checking for extended mallinfo""... $ac_c" 1>&6 -echo "configure:2607: checking for extended mallinfo" >&5 +echo "configure:2648: checking for extended mallinfo" >&5 if eval "test \"`echo '$''{'ac_cv_have_ext_mallinfo'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #include @@ -2618,7 +2659,7 @@ struct mallinfo foo; foo.mxfast = 0; ; return 0; } EOF -if { (eval echo configure:2622: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2663: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_ext_mallinfo="yes" else @@ -2640,13 +2681,13 @@ EOF fi echo $ac_n "checking for struct rusage""... $ac_c" 1>&6 -echo "configure:2644: checking for struct rusage" >&5 +echo "configure:2685: checking for struct rusage" >&5 if eval "test \"`echo '$''{'ac_cv_have_struct_rusage'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2704: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_struct_rusage="yes" else @@ -2681,13 +2722,13 @@ EOF fi echo $ac_n "checking for ip->ip_hl""... $ac_c" 1>&6 -echo "configure:2685: checking for ip->ip_hl" >&5 +echo "configure:2726: checking for ip->ip_hl" >&5 if eval "test \"`echo '$''{'ac_cv_have_ip_hl'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #include @@ -2704,7 +2745,7 @@ struct iphdr ip; ip.ip_hl= 0; ; return 0; } EOF -if { (eval echo configure:2708: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:2749: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_ip_hl="yes" else @@ -2726,7 +2767,7 @@ EOF fi echo $ac_n "checking size of int""... $ac_c" 1>&6 -echo "configure:2730: checking size of int" >&5 +echo "configure:2771: checking size of int" >&5 if eval "test \"`echo '$''{'ac_cv_sizeof_int'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -2734,7 +2775,7 @@ else { echo "configure: error: can not run test program while cross compiling" 1>&2; exit 1; } else cat > conftest.$ac_ext < main() @@ -2745,7 +2786,7 @@ main() exit(0); } EOF -if { (eval echo configure:2749: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:2790: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then ac_cv_sizeof_int=`cat conftestval` else @@ -2765,7 +2806,7 @@ EOF echo $ac_n "checking size of long""... $ac_c" 1>&6 -echo "configure:2769: checking size of long" >&5 +echo "configure:2810: checking size of long" >&5 if eval "test \"`echo '$''{'ac_cv_sizeof_long'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -2773,7 +2814,7 @@ else { echo "configure: error: can not run test program while cross compiling" 1>&2; exit 1; } else cat > conftest.$ac_ext < main() @@ -2784,7 +2825,7 @@ main() exit(0); } EOF -if { (eval echo configure:2788: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:2829: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then ac_cv_sizeof_long=`cat conftestval` else @@ -2804,7 +2845,7 @@ EOF echo $ac_n "checking size of void *""... $ac_c" 1>&6 -echo "configure:2808: checking size of void *" >&5 +echo "configure:2849: checking size of void *" >&5 if eval "test \"`echo '$''{'ac_cv_sizeof_void_p'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -2812,7 +2853,7 @@ else { echo "configure: error: can not run test program while cross compiling" 1>&2; exit 1; } else cat > conftest.$ac_ext < main() @@ -2823,7 +2864,7 @@ main() exit(0); } EOF -if { (eval echo configure:2827: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:2868: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then ac_cv_sizeof_void_p=`cat conftestval` else @@ -2846,19 +2887,19 @@ EOF # The Ultrix 4.2 mips builtin alloca declared by alloca.h only works # for constant arguments. Useless! echo $ac_n "checking for working alloca.h""... $ac_c" 1>&6 -echo "configure:2850: checking for working alloca.h" >&5 +echo "configure:2891: checking for working alloca.h" >&5 if eval "test \"`echo '$''{'ac_cv_header_alloca_h'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < int main() { char *p = alloca(2 * sizeof(int)); ; return 0; } EOF -if { (eval echo configure:2862: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:2903: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* ac_cv_header_alloca_h=yes else @@ -2879,12 +2920,12 @@ EOF fi echo $ac_n "checking for alloca""... $ac_c" 1>&6 -echo "configure:2883: checking for alloca" >&5 +echo "configure:2924: checking for alloca" >&5 if eval "test \"`echo '$''{'ac_cv_func_alloca_works'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:2952: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* ac_cv_func_alloca_works=yes else @@ -2939,12 +2980,12 @@ EOF echo $ac_n "checking whether alloca needs Cray hooks""... $ac_c" 1>&6 -echo "configure:2943: checking whether alloca needs Cray hooks" >&5 +echo "configure:2984: checking whether alloca needs Cray hooks" >&5 if eval "test \"`echo '$''{'ac_cv_os_cray'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&6 if test $ac_cv_os_cray = yes; then for ac_func in _getb67 GETB67 getb67; do echo $ac_n "checking for $ac_func""... $ac_c" 1>&6 -echo "configure:2973: checking for $ac_func" >&5 +echo "configure:3014: checking for $ac_func" >&5 if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3042: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_func_$ac_func=yes" else @@ -3024,7 +3065,7 @@ done fi echo $ac_n "checking stack direction for C alloca""... $ac_c" 1>&6 -echo "configure:3028: checking stack direction for C alloca" >&5 +echo "configure:3069: checking stack direction for C alloca" >&5 if eval "test \"`echo '$''{'ac_cv_c_stack_direction'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -3032,7 +3073,7 @@ else ac_cv_c_stack_direction=0 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:3096: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then ac_cv_c_stack_direction=1 else @@ -3074,12 +3115,12 @@ fi echo $ac_n "checking for pid_t""... $ac_c" 1>&6 -echo "configure:3078: checking for pid_t" >&5 +echo "configure:3119: checking for pid_t" >&5 if eval "test \"`echo '$''{'ac_cv_type_pid_t'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if STDC_HEADERS @@ -3107,12 +3148,12 @@ EOF fi echo $ac_n "checking for size_t""... $ac_c" 1>&6 -echo "configure:3111: checking for size_t" >&5 +echo "configure:3152: checking for size_t" >&5 if eval "test \"`echo '$''{'ac_cv_type_size_t'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if STDC_HEADERS @@ -3140,12 +3181,12 @@ EOF fi echo $ac_n "checking for ssize_t""... $ac_c" 1>&6 -echo "configure:3144: checking for ssize_t" >&5 +echo "configure:3185: checking for ssize_t" >&5 if eval "test \"`echo '$''{'ac_cv_type_ssize_t'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if STDC_HEADERS @@ -3173,12 +3214,12 @@ EOF fi echo $ac_n "checking for off_t""... $ac_c" 1>&6 -echo "configure:3177: checking for off_t" >&5 +echo "configure:3218: checking for off_t" >&5 if eval "test \"`echo '$''{'ac_cv_type_off_t'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if STDC_HEADERS @@ -3206,12 +3247,12 @@ EOF fi echo $ac_n "checking for mode_t""... $ac_c" 1>&6 -echo "configure:3210: checking for mode_t" >&5 +echo "configure:3251: checking for mode_t" >&5 if eval "test \"`echo '$''{'ac_cv_type_mode_t'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if STDC_HEADERS @@ -3239,12 +3280,12 @@ EOF fi echo $ac_n "checking for fd_mask""... $ac_c" 1>&6 -echo "configure:3243: checking for fd_mask" >&5 +echo "configure:3284: checking for fd_mask" >&5 if eval "test \"`echo '$''{'ac_cv_type_fd_mask'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #if STDC_HEADERS @@ -3273,13 +3314,13 @@ fi echo $ac_n "checking for socklen_t""... $ac_c" 1>&6 -echo "configure:3277: checking for socklen_t" >&5 +echo "configure:3318: checking for socklen_t" >&5 if eval "test \"`echo '$''{'ac_cv_type_socklen_t'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < #include @@ -3310,7 +3351,7 @@ EOF fi echo $ac_n "checking for main in -lnsl""... $ac_c" 1>&6 -echo "configure:3314: checking for main in -lnsl" >&5 +echo "configure:3355: checking for main in -lnsl" >&5 ac_lib_var=`echo nsl'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3318,14 +3359,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lnsl $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3370: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3353,7 +3394,7 @@ else fi echo $ac_n "checking for main in -lsocket""... $ac_c" 1>&6 -echo "configure:3357: checking for main in -lsocket" >&5 +echo "configure:3398: checking for main in -lsocket" >&5 ac_lib_var=`echo socket'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3361,14 +3402,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lsocket $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3413: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3400,7 +3441,7 @@ if test "x$ac_cv_enabled_dlmalloc" = "xyes" ; then echo "skipping libmalloc check (--enable-dlmalloc specified)" else echo $ac_n "checking for main in -lgnumalloc""... $ac_c" 1>&6 -echo "configure:3404: checking for main in -lgnumalloc" >&5 +echo "configure:3445: checking for main in -lgnumalloc" >&5 ac_lib_var=`echo gnumalloc'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3408,14 +3449,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lgnumalloc $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3460: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3458,7 +3499,7 @@ fi *) echo $ac_n "checking for main in -lmalloc""... $ac_c" 1>&6 -echo "configure:3462: checking for main in -lmalloc" >&5 +echo "configure:3503: checking for main in -lmalloc" >&5 ac_lib_var=`echo malloc'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3466,14 +3507,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lmalloc $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3518: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3506,7 +3547,7 @@ fi fi echo $ac_n "checking for main in -lbsd""... $ac_c" 1>&6 -echo "configure:3510: checking for main in -lbsd" >&5 +echo "configure:3551: checking for main in -lbsd" >&5 ac_lib_var=`echo bsd'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3514,14 +3555,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lbsd $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3566: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3549,7 +3590,7 @@ else fi echo $ac_n "checking for main in -lregex""... $ac_c" 1>&6 -echo "configure:3553: checking for main in -lregex" >&5 +echo "configure:3594: checking for main in -lregex" >&5 ac_lib_var=`echo regex'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3557,14 +3598,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lregex $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3609: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3585,7 +3626,7 @@ else fi echo $ac_n "checking for gethostbyname in -lbind""... $ac_c" 1>&6 -echo "configure:3589: checking for gethostbyname in -lbind" >&5 +echo "configure:3630: checking for gethostbyname in -lbind" >&5 ac_lib_var=`echo bind'_'gethostbyname | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3593,7 +3634,7 @@ else ac_save_LIBS="$LIBS" LIBS="-lbind $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3649: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3638,7 +3679,7 @@ if test $ac_cv_lib_bind_gethostbyname = "no" ; then ;; *) echo $ac_n "checking for inet_aton in -lresolv""... $ac_c" 1>&6 -echo "configure:3642: checking for inet_aton in -lresolv" >&5 +echo "configure:3683: checking for inet_aton in -lresolv" >&5 ac_lib_var=`echo resolv'_'inet_aton | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3646,7 +3687,7 @@ else ac_save_LIBS="$LIBS" LIBS="-lresolv $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3702: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3673,7 +3714,7 @@ fi if eval "test \"`echo '$ac_cv_lib_'$ac_lib_var`\" = yes"; then echo "$ac_t""yes" 1>&6 echo $ac_n "checking for inet_aton in -l44bsd""... $ac_c" 1>&6 -echo "configure:3677: checking for inet_aton in -l44bsd" >&5 +echo "configure:3718: checking for inet_aton in -l44bsd" >&5 ac_lib_var=`echo 44bsd'_'inet_aton | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3681,7 +3722,7 @@ else ac_save_LIBS="$LIBS" LIBS="-l44bsd $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3737: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3724,7 +3765,7 @@ else fi echo $ac_n "checking for main in -lresolv""... $ac_c" 1>&6 -echo "configure:3728: checking for main in -lresolv" >&5 +echo "configure:3769: checking for main in -lresolv" >&5 ac_lib_var=`echo resolv'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3732,14 +3773,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lresolv $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3784: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3770,7 +3811,7 @@ fi esac fi echo $ac_n "checking for main in -lm""... $ac_c" 1>&6 -echo "configure:3774: checking for main in -lm" >&5 +echo "configure:3815: checking for main in -lm" >&5 ac_lib_var=`echo m'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3778,14 +3819,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lm $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3830: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3814,7 +3855,7 @@ fi echo $ac_n "checking for crypt in -lcrypt""... $ac_c" 1>&6 -echo "configure:3818: checking for crypt in -lcrypt" >&5 +echo "configure:3859: checking for crypt in -lcrypt" >&5 ac_lib_var=`echo crypt'_'crypt | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3822,7 +3863,7 @@ else ac_save_LIBS="$LIBS" LIBS="-lcrypt $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3878: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3866,7 +3907,7 @@ fi echo $ac_n "checking for main in -lpthread""... $ac_c" 1>&6 -echo "configure:3870: checking for main in -lpthread" >&5 +echo "configure:3911: checking for main in -lpthread" >&5 ac_lib_var=`echo pthread'_'main | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3874,14 +3915,14 @@ else ac_save_LIBS="$LIBS" LIBS="-lpthread $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3926: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -3916,7 +3957,7 @@ fi case "$host" in *-pc-sco3.2*) echo $ac_n "checking for strftime in -lintl""... $ac_c" 1>&6 -echo "configure:3920: checking for strftime in -lintl" >&5 +echo "configure:3961: checking for strftime in -lintl" >&5 ac_lib_var=`echo intl'_'strftime | sed 'y%./+-%__p_%'` if eval "test \"`echo '$''{'ac_cv_lib_$ac_lib_var'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 @@ -3924,7 +3965,7 @@ else ac_save_LIBS="$LIBS" LIBS="-lintl $LIBS" cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:3980: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_lib_$ac_lib_var=yes" else @@ -4097,12 +4138,12 @@ for ac_func in \ do echo $ac_n "checking for $ac_func""... $ac_c" 1>&6 -echo "configure:4101: checking for $ac_func" >&5 +echo "configure:4142: checking for $ac_func" >&5 if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:4170: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_func_$ac_func=yes" else @@ -4158,12 +4199,12 @@ if test "$async_io" = "yes" ; then do echo $ac_n "checking for $ac_func""... $ac_c" 1>&6 -echo "configure:4162: checking for $ac_func" >&5 +echo "configure:4203: checking for $ac_func" >&5 if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:4231: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_func_$ac_func=yes" else @@ -4213,7 +4254,7 @@ done fi echo $ac_n "checking if setresuid is implemented""... $ac_c" 1>&6 -echo "configure:4217: checking if setresuid is implemented" >&5 +echo "configure:4258: checking if setresuid is implemented" >&5 if eval "test \"`echo '$''{'ac_cv_func_setresuid'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else @@ -4221,7 +4262,7 @@ else { echo "configure: error: can not run test program while cross compiling" 1>&2; exit 1; } else cat > conftest.$ac_ext < @@ -4234,7 +4275,7 @@ else } EOF -if { (eval echo configure:4238: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4279: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then ac_cv_func_setresuid="yes" else @@ -4259,7 +4300,7 @@ fi if test "$IPF_TRANSPARENT" ; then echo $ac_n "checking if IP-Filter header files are installed""... $ac_c" 1>&6 -echo "configure:4263: checking if IP-Filter header files are installed" >&5 +echo "configure:4304: checking if IP-Filter header files are installed" >&5 if test "$ac_cv_header_ip_compat_h" = "yes" && test "$ac_cv_header_ip_fil_h" = "yes" && test "$ac_cv_header_ip_nat_h" = "yes" ; then @@ -4302,13 +4343,13 @@ if test -z "$USE_GNUREGEX" ; then esac fi echo $ac_n "checking if GNUregex needs to be compiled""... $ac_c" 1>&6 -echo "configure:4306: checking if GNUregex needs to be compiled" >&5 +echo "configure:4347: checking if GNUregex needs to be compiled" >&5 if test -z "$USE_GNUREGEX"; then if test "$ac_cv_func_regcomp" = "no" || test "$USE_GNUREGEX" = "yes" ; then USE_GNUREGEX="yes" else cat > conftest.$ac_ext < #include @@ -4316,7 +4357,7 @@ int main() { regex_t t; regcomp(&t,"",0); ; return 0; } EOF -if { (eval echo configure:4320: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:4361: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* USE_GNUREGEX="no" else @@ -4347,12 +4388,12 @@ for ac_func in \ do echo $ac_n "checking for $ac_func""... $ac_c" 1>&6 -echo "configure:4351: checking for $ac_func" >&5 +echo "configure:4392: checking for $ac_func" >&5 if eval "test \"`echo '$''{'ac_cv_func_$ac_func'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:4420: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* eval "ac_cv_func_$ac_func=yes" else @@ -4403,12 +4444,12 @@ done echo $ac_n "checking Default FD_SETSIZE value""... $ac_c" 1>&6 -echo "configure:4407: checking Default FD_SETSIZE value" >&5 +echo "configure:4448: checking Default FD_SETSIZE value" >&5 if test "$cross_compiling" = yes; then DEFAULT_FD_SETSIZE=256 else cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4478: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then DEFAULT_FD_SETSIZE=`cat conftestval` else @@ -4452,7 +4493,7 @@ EOF echo $ac_n "checking Maximum number of filedescriptors we can open""... $ac_c" 1>&6 -echo "configure:4456: checking Maximum number of filedescriptors we can open" >&5 +echo "configure:4497: checking Maximum number of filedescriptors we can open" >&5 TLDFLAGS="$LDFLAGS" case $host in i386-unknown-freebsd*) @@ -4464,7 +4505,7 @@ if test "$cross_compiling" = yes; then SQUID_MAXFD=256 else cat > conftest.$ac_ext < @@ -4521,7 +4562,7 @@ main() { } EOF -if { (eval echo configure:4525: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4566: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then SQUID_MAXFD=`cat conftestval` else @@ -4548,12 +4589,12 @@ fi LDFLAGS="$TLDFLAGS" echo $ac_n "checking Default UDP send buffer size""... $ac_c" 1>&6 -echo "configure:4552: checking Default UDP send buffer size" >&5 +echo "configure:4593: checking Default UDP send buffer size" >&5 if test "$cross_compiling" = yes; then SQUID_UDP_SO_SNDBUF=16384 else cat > conftest.$ac_ext < @@ -4574,7 +4615,7 @@ main () } EOF -if { (eval echo configure:4578: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4619: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then SQUID_UDP_SO_SNDBUF=`cat conftestval` else @@ -4593,12 +4634,12 @@ EOF echo $ac_n "checking Default UDP receive buffer size""... $ac_c" 1>&6 -echo "configure:4597: checking Default UDP receive buffer size" >&5 +echo "configure:4638: checking Default UDP receive buffer size" >&5 if test "$cross_compiling" = yes; then SQUID_UDP_SO_RCVBUF=16384 else cat > conftest.$ac_ext < @@ -4619,7 +4660,7 @@ main () } EOF -if { (eval echo configure:4623: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4664: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then SQUID_UDP_SO_RCVBUF=`cat conftestval` else @@ -4638,12 +4679,12 @@ EOF echo $ac_n "checking Default TCP send buffer size""... $ac_c" 1>&6 -echo "configure:4642: checking Default TCP send buffer size" >&5 +echo "configure:4683: checking Default TCP send buffer size" >&5 if test "$cross_compiling" = yes; then SQUID_TCP_SO_SNDBUF=16384 else cat > conftest.$ac_ext < @@ -4664,7 +4705,7 @@ main () } EOF -if { (eval echo configure:4668: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4709: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then SQUID_TCP_SO_SNDBUF=`cat conftestval` else @@ -4683,12 +4724,12 @@ EOF echo $ac_n "checking Default TCP receive buffer size""... $ac_c" 1>&6 -echo "configure:4687: checking Default TCP receive buffer size" >&5 +echo "configure:4728: checking Default TCP receive buffer size" >&5 if test "$cross_compiling" = yes; then SQUID_TCP_SO_RCVBUF=16384 else cat > conftest.$ac_ext < @@ -4709,7 +4750,7 @@ main () } EOF -if { (eval echo configure:4713: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4754: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then SQUID_TCP_SO_RCVBUF=`cat conftestval` else @@ -4728,19 +4769,19 @@ EOF echo $ac_n "checking if sys_errlist is already defined""... $ac_c" 1>&6 -echo "configure:4732: checking if sys_errlist is already defined" >&5 +echo "configure:4773: checking if sys_errlist is already defined" >&5 if eval "test \"`echo '$''{'ac_cv_needs_sys_errlist'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext < int main() { -char *s = sys_errlist0; +char *s = sys_errlist; ; return 0; } EOF -if { (eval echo configure:4744: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:4785: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_needs_sys_errlist="no" else @@ -4762,16 +4803,16 @@ EOF fi echo $ac_n "checking for libresolv _dns_ttl_ hack""... $ac_c" 1>&6 -echo "configure:4766: checking for libresolv _dns_ttl_ hack" >&5 +echo "configure:4807: checking for libresolv _dns_ttl_ hack" >&5 cat > conftest.$ac_ext <&5; (eval $ac_link) 2>&5; } && test -s conftest; then +if { (eval echo configure:4816: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest; then rm -rf conftest* echo "$ac_t""yes" 1>&6 cat >> confdefs.h <<\EOF @@ -4787,12 +4828,12 @@ fi rm -f conftest* echo $ac_n "checking if inet_ntoa() actually works""... $ac_c" 1>&6 -echo "configure:4791: checking if inet_ntoa() actually works" >&5 +echo "configure:4832: checking if inet_ntoa() actually works" >&5 if test "$cross_compiling" = yes; then INET_NTOA_RESULT="broken" else cat > conftest.$ac_ext < @@ -4811,7 +4852,7 @@ main () } EOF -if { (eval echo configure:4815: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null +if { (eval echo configure:4856: \"$ac_link\") 1>&5; (eval $ac_link) 2>&5; } && test -s conftest && (./conftest; exit) 2>/dev/null then INET_NTOA_RESULT=`cat conftestval` else @@ -4837,9 +4878,9 @@ fi if test "$ac_cv_header_sys_statvfs_h" = "yes" ; then echo $ac_n "checking for working statvfs() interface""... $ac_c" 1>&6 -echo "configure:4841: checking for working statvfs() interface" >&5 +echo "configure:4882: checking for working statvfs() interface" >&5 cat > conftest.$ac_ext < @@ -4856,7 +4897,7 @@ statvfs("/tmp", &sfs); ; return 0; } EOF -if { (eval echo configure:4860: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:4901: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_func_statvfs=yes else @@ -4876,12 +4917,12 @@ fi fi echo $ac_n "checking for _res.nsaddr_list""... $ac_c" 1>&6 -echo "configure:4880: checking for _res.nsaddr_list" >&5 +echo "configure:4921: checking for _res.nsaddr_list" >&5 if eval "test \"`echo '$''{'ac_cv_have_res_nsaddr_list'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:4949: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_res_nsaddr_list="yes" else @@ -4926,12 +4967,12 @@ fi if test $ac_cv_have_res_nsaddr_list = "no" ; then echo $ac_n "checking for _res.ns_list""... $ac_c" 1>&6 -echo "configure:4930: checking for _res.ns_list" >&5 +echo "configure:4971: checking for _res.ns_list" >&5 if eval "test \"`echo '$''{'ac_cv_have_res_ns_list'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 else cat > conftest.$ac_ext <&5; (eval $ac_compile) 2>&5; }; then +if { (eval echo configure:4999: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>&5; }; then rm -rf conftest* ac_cv_have_res_ns_list="yes" else @@ -5116,6 +5157,7 @@ trap 'rm -fr `echo "\ ./auth_modules/PAM/Makefile \ ./auth_modules/SMB/Makefile ./auth_modules/getpwnam/Makefile \ + ./auth_modules/LDAP/Makefile \ include/autoconf.h" | sed "s/:[^ ]*//g"` conftest*; exit 1' 1 2 15 EOF cat >> $CONFIG_STATUS <> $CONFIG_STATUS <<\EOF diff --git a/configure.in b/configure.in index e2ebdee103..f662b49d67 100644 --- a/configure.in +++ b/configure.in @@ -3,13 +3,13 @@ dnl Configuration input file for Squid dnl dnl Duane Wessels, wessels@nlanr.net, February 1996 (autoconf v2.9) dnl -dnl $Id: configure.in,v 1.177 1999/07/13 14:50:57 wessels Exp $ +dnl $Id: configure.in,v 1.178 1999/10/04 05:04:07 wessels Exp $ dnl dnl dnl AC_INIT(src/main.c) AC_CONFIG_HEADER(include/autoconf.h) -AC_REVISION($Revision: 1.177 $)dnl +AC_REVISION($Revision: 1.178 $)dnl AC_PREFIX_DEFAULT(/usr/local/squid) AC_CONFIG_AUX_DIR(cfgaux) @@ -246,8 +246,26 @@ AC_ARG_ENABLE(carp, ]) AC_ARG_ENABLE(async_io, -[ --enable-async-io Do ASYNC disk I/O using threads], -[ if test "$enableval" = "yes" ; then +[ --enable-async-io[=N_THREADS] + Do ASYNC disk I/O using threads. + N_THREADS is the number of worker threads + defaults to 16. See also src/squid.h for + some additional platform tuning], +[ case "$enableval" in + yes) + async_io=yes + ;; + no) + async_io='' + ;; + *) + async_io=yes + AC_DEFINE_UNQUOTED(NUMTHREADS, $enableval) + ;; + esac +]) + +if test -n "$async_io" ; then echo "Async I/O enabled" async_io=yes AC_DEFINE(USE_ASYNC_IO) @@ -263,8 +281,7 @@ AC_ARG_ENABLE(async_io, fi ;; esac - fi -]) +fi AC_SUBST(ASYNC_OBJS) AC_SUBST(SQUID_PTHREAD_LIB) @@ -519,6 +536,20 @@ AC_ARG_ENABLE(truncate, fi ]) +dnl Enable underscore in hostnames +AC_ARG_ENABLE(underscores, +[ --enable-underscores Squid by default rejects any host names with _ + in their name to conform with internet standars. + If you disagree with this you may allow _ in + hostnames by using this switch, provided that + the resolver library on the host where Squid runs + does not reject _ in hostnames...], +[ if test "$enableval" = "yes" ; then + echo "Enabling the use of underscores in host names" + AC_DEFINE(ALLOW_HOSTNAME_UNDERSCORES, 1) + fi +]) + # Force some compilers to use ANSI features # case "$host" in @@ -1265,7 +1296,7 @@ AC_MSG_RESULT($SQUID_TCP_SO_RCVBUF) AC_DEFINE_UNQUOTED(SQUID_TCP_SO_RCVBUF, $SQUID_TCP_SO_RCVBUF) AC_CACHE_CHECK(if sys_errlist is already defined, ac_cv_needs_sys_errlist, - AC_TRY_COMPILE([#include ],[char *s = sys_errlist[0];], + AC_TRY_COMPILE([#include ],[char *s = sys_errlist;], ac_cv_needs_sys_errlist="no", ac_cv_needs_sys_errlist="yes") ) @@ -1431,4 +1462,5 @@ AC_OUTPUT(\ ./auth_modules/PAM/Makefile \ ./auth_modules/SMB/Makefile ./auth_modules/getpwnam/Makefile \ + ./auth_modules/LDAP/Makefile \ ) diff --git a/doc/Programming-Guide/prog-guide.sgml b/doc/Programming-Guide/prog-guide.sgml index 2245dc50d9..cb9ec00531 100644 --- a/doc/Programming-Guide/prog-guide.sgml +++ b/doc/Programming-Guide/prog-guide.sgml @@ -1555,4 +1555,90 @@ Squid consists of the following major components Most of the operations are faster than their "ascii string" equivalents. +File Formats + + +NOTE: this information is current as of version 2.2.STABLE4. + +

+A +struct _storeSwapLogData { + char op; + int swap_file_number; + time_t timestamp; + time_t lastref; + time_t expires; + time_t lastmod; + size_t swap_file_sz; + u_short refcount; + u_short flags; + unsigned char key[MD5_DIGEST_CHARS]; +}; + + + + + +Note that diff --git a/doc/draft-vixie-htcp-proto-04.txt b/doc/draft-vixie-htcp-proto-04.txt new file mode 100644 index 0000000000..412e2c619b --- /dev/null +++ b/doc/draft-vixie-htcp-proto-04.txt @@ -0,0 +1,793 @@ + + ICP Working Group Paul Vixie + INTERNET-DRAFT ISC + Duane Wessels + NLANR + June, 1999 + + + Hyper Text Caching Protocol (HTCP/0.0) + + + Status of this Memo + + This document is an Internet-Draft and is in full conformance with + all provisions of Section 10 of RFC2026. + + Internet-Drafts are working documents of the Internet Engineering + Task Force (IETF), its areas, and its working groups. Note that + other groups may also distribute working documents as Internet- + Drafts. + + Internet-Drafts are draft documents valid for a maximum of six months + and may be updated, replaced, or obsoleted by other documents at any + time. It is inappropriate to use Internet-Drafts as reference + material or to cite them other than as "work in progress." + + The list of current Internet-Drafts can be accessed at + http://www.ietf.org/ietf/1id-abstracts.txt + + The list of Internet-Draft Shadow Directories can be accessed at + http://www.ietf.org/shadow.html. + + + Abstract + + This document describes HTCP, a protocol for discovering HTTP caches + and cached data, managing sets of HTTP caches, and monitoring cache + activity. This is an experimental protocol, one among several + proposals to perform these functions. + + + + + + + + + + + Expires December 1999 [Page 1] + + INTERNET-DRAFT HTCP June 1999 + + + 1 - Definitions, Rationale and Scope + + 1.1. HTTP/1.1 (see [RFC2068]) permits the transfer of web objects from + ``origin servers'', possibly via ``proxies'' (which are allowed under + some circumstances to ``cache'' such objects for subsequent reuse) to + ``clients'' which consume the object in some way, usually by displaying + it as part of a ``web page.'' HTTP/1.0 and later permit ``headers'' to + be included in a request and/or a response, thus expanding upon the + HTTP/0.9 (and earlier) behaviour of specifying only a URI in the request + and offering only a body in the response. + + 1.2. ICP (see [RFC2186]) permits caches to be queried as to their + content, usually by other caches who are hoping to avoid an expensive + fetch from a distant origin server. ICP was designed with HTTP/0.9 in + mind, such that only the URI (without any headers) is used when + describing cached content, and the possibility of multiple compatible + bodies for the same URI had not yet been imagined. + + 1.3. This document specifies a Hyper Text Caching Protocol (HTCP or + simply HoT CraP) which permits full request and response headers to be + used in cache management, and expands the domain of cache management to + include monitoring a remote cache's additions and deletions, requesting + immediate deletions, and sending hints about web objects such as the + third party locations of cacheable objects or the measured + uncacheability or unavailability of web objects. + + 2 - HTCP Protocol + + 2.1. All multi-octet HTCP protocol elements are transmitted in network + byte order. All RESERVED fields should be set to binary zero by senders + and left unexamined by receivers. Headers must be presented with the + CRLF line termination, as in HTTP. + + 2.2. Any hostnames specified should be compatible between sender and + receiver, such that if a private naming scheme (such as HOSTS.TXT or + NIS) is in use, names depending on such schemes will only be sent to + HTCP neighbors who are known to participate in said schemes. Raw + addresses (dotted quad IPv4, or colon-format IPv6) are universal, as are + public DNS names. Use of private names or addresses will require + special operational care. + + 2.3. UDP must be supported. HTCP agents must not be isolated from + NETWORK failures and delays. An HTCP agent should be prepared to act in + useful ways when no response is forthcoming, or when responses are + delayed or reordered or damaged. TCP is optional and is expected to be + + + + Expires December 1999 [Page 2] + + INTERNET-DRAFT HTCP June 1999 + + + used only for protocol debugging. The IANA has assigned port 4827 as + the standard TCP and UDP port number for HTCP. + + 2.4. A set of configuration variables concerning transport + characteristics should be maintained for each agent which is capable of + initiating HTCP transactions, perhaps with a set of per-agent global + defaults. These variables are: + + Maximum number of unacknowledged transactions before a ``failure'' is + imputed. + + Maximum interval without a response to some transaction before a + ``failure'' is imputed. + + Should ICMP-Portunreach be treated as a failure? + + Should RESPONSE=5 && MO=1 be treated as a failure? + + Minimum interval before trying a new transaction after a failure + + 2.5. An HTCP Message has the following general format: + + +---------------------+ + | HEADER | tells message length and protocol versions + +---------------------+ + | DATA | HTCP message (varies per major version number) + +---------------------+ + | AUTH | optional authentication for transaction + +---------------------+ + + + 2.6. An HTCP/*.* HEADER has the following format: + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | LENGTH | + + + + + + + + + + + + + + + + + + + 2: | LENGTH | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 2: | MAJOR | MINOR | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + + + + + + Expires December 1999 [Page 3] + + INTERNET-DRAFT HTCP June 1999 + + + LENGTH is the message length, inclusive of all header and data octets, + including the LENGTH field itself. This field will be equal to + the datagam payload size (``record length'') if a datagram + protocol is in use, and can include padding, i.e., not all + octets of the message need be used in the DATA and AUTH + sections. + + MAJOR is the major version number (0 for this specification). The + DATA section of an HTCP message need not be upward or downward + compatble between different major version numbers. + + MINOR is the minor version number (0 for this specification). Feature + levels and interpretation rules can vary depending on this + field, in particular RESERVED fields can take on new (though + optional) meaning in successive minor version numbers within the + same major version number. + + 2.6.1. It is expected that an HTCP initiator will know the version + number of a prospective HTCP responder, or that the initiator will probe + using declining values for MINOR and MAJOR (beginning with the highest + locally supported value) and locally cache the probed version number of + the responder. + + 2.6.2. Higher MAJOR numbers are to be preferred, as are higher MINOR + numbers within a particular MAJOR number. + + 2.7. An HTCP/0.* DATA has the following structure: + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | LENGTH | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 2: | OPCODE | RESPONSE | RESERVED |F1 |RR | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 4: | TRANS-ID | + + + + + + + + + + + + + + + + + + + 6: | TRANS-ID | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 8: | | + / OP-DATA / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + + + + + Expires December 1999 [Page 4] + + INTERNET-DRAFT HTCP June 1999 + + + LENGTH is the number of octets of the message which are reserved for + the DATA section, including the LENGTH field itself. This + number can include padding, i.e., not all octets reserved by + LENGTH need be used in OP-DATA. + + OPCODE is the operation code of an HTCP transaction. An HTCP + transaction can consist of multiple HTCP messages, e.g., a + request (sent by the initiator), or a response (sent by the + responder). + + RESPONSE is a numeric code indicating the success or failure of a + transaction. It should be set to zero (0) by requestors and + ignored by responders. Each operation has its own set of + response codes, which are described later. The overall + message has a set of response codes which are as follows: + + 0 authentication wasn't used but is required + 1 authentication was used but unsatisfactorily + 2 opcode not implemented + 3 major version not supported + 4 minor version not supported (major version is ok) + 5 inappropriate, disallowed, or undesirable opcode + + The above response codes all indicate errors and all depend + for their visibility on MO=1 (as specified below). + + RR is a flag indicating whether this message is a request (0) or + response (1). + + F1 is overloaded such that it is used differently by requestors + than by responders. If RR=0, then F1 is defined as RD. If + RR=1, then F1 is defined as MO. + + RD is a flag which if set to 1 means that a response is desired. + Some OPCODEs require RD to be set to 1 to be meaningful. + + MO (em-oh) is a flag which indicates whether the RESPONSE code is + to be interpreted as a response to the overall message (fixed + fields in DATA or any field of AUTH) [MO=1] or as a response + to fields in the OP-DATA [MO=0]. + + TRANS-ID is a 32-bit value which when combined with the initiator's + network address, uniquely identifies this HTCP transaction. + Care should be taken not to reuse TRANS-ID's within the life- + time of a UDP datagram. + + + + Expires December 1999 [Page 5] + + INTERNET-DRAFT HTCP June 1999 + + + OP-DATA is opcode-dependent and is defined below, per opcode. + + 2.8. An HTCP/0.0 AUTH has the following structure: + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | LENGTH | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 2: | SIG-TIME | + + + + + + + + + + + + + + + + + + + 4: | SIG-TIME | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 6: | SIG-EXPIRE | + + + + + + + + + + + + + + + + + + + 8: | SIG-EXPIRE | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 10: | | + / KEY-NAME / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + n: | | + / SIGNATURE / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + LENGTH is the number of octets used by the AUTH, including the + LENGTH field itself. If the optional AUTH is not being + transmitted, this field should be set to 2 (two). LENGTH + can include padding, which means that not all octets + reserved by LENGTH will necessarily be consumed by + SIGNATURE. + + SIG-TIME is an unsigned binary count of the number of seconds since + 00:00:00 1-Jan-70 UTC at the time the SIGNATURE is + generated. + + SIG-EXPIRE is an unsigned binary count of the number of seconds since + 00:00:00 1-Jan-70 UTC at the time the SIGNATURE is + considered to have expired. + + KEY-NAME is a COUNTSTR [3.1] which specifies the name of a shared + secret. (Each HTCP implementation is expected to allow + configuration of several shared secrets, each of which will + have a name.) + + + + Expires December 1999 [Page 6] + + INTERNET-DRAFT HTCP June 1999 + + + SIGNATURE is a COUNTSTR [3.1] which holds the HMAC-MD5 digest (see + [RFC 2104]), with a B value of 64, of the following + elements, each of which is digested in its ``on the wire'' + format, including transmitted padding if any is covered by a + field's associated LENGTH: + + IP SRC ADDR [4 octets] + IP SRC PORT [2 octets] + IP DST ADDR [4 octets] + IP DST PORT [2 octets] + HTCP MAJOR version number [1 octet] + HTCP MINOR version number [1 octet] + SIG-TIME [4 octets] + SIG-EXPIRE [4 octets] + HTCP DATA [variable] + KEY-NAME (the whole COUNTSTR [3.1]) [variable] + + + 2.8.1. Shared secrets should be cryptorandomly generated and should be + at least a few hundred octets in size. + + 3 - Data Types + + HTCP/0.* data types are defined as follows: + + 3.1. COUNTSTR is a counted string whose format is: + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | LENGTH | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 2: | | + / TEXT / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + LENGTH is the number of octets which will follow in TEXT. This field + is *not* self-inclusive as is the case with other HTCP LENGTH + fields. + + TEXT is a stream of uninterpreted octets, usually ISO8859-1 + ``characters''. + + + + + + Expires December 1999 [Page 7] + + INTERNET-DRAFT HTCP June 1999 + + + 3.2. SPECIFIER is used with the TST and CLR request messages, defined + below. Its format is: + + +---------------------+ + | METHOD | : COUNTSTR + +---------------------+ + | URI | : COUNTSTR + +---------------------+ + | VERSION | : COUNTSTR + +---------------------+ + | REQ-HDRS | : COUNTSTR + +---------------------+ + + + METHOD (Since HTCP only returns headers, methods GET and HEAD are + equivilent.) + + URI (If the URI is a URL, it should always include a ``:'' + specifier, but in its absense, port 80 should be imputed by a + receiver.) + + VERSION is an entire HTTP version string such as ``HTTP/1.1''. + VERSION strings with prefixes other than ``HTTP/'' or with + version numbers less than ``1.1'' are outside the domain of + this specification. + + REQ-HDRS are those presented by an HTTP initiator. These headers + should include end-to-end but NOT hop-by-hop headers, and they + can be canonicalized (aggregation of ``Accept:'' is permitted, + for example.) + + 3.3. DETAIL is used with the TST response message, defined below. Its + format is: + + +---------------------+ + | RESP-HDRS | : COUNTSTR + +---------------------+ + | ENTITY-HDRS | : COUNTSTR + +---------------------+ + | CACHE-HDRS | : COUNTSTR + +---------------------+ + + + + + + + + Expires December 1999 [Page 8] + + INTERNET-DRAFT HTCP June 1999 + + + 3.4. IDENTITY is used with the MON request and SET response message, + defined below. Its format is: + + +---------------------+ + | SPECIFIER | + +---------------------+ + | DETAIL | + +---------------------+ + + + 4 - Cache Headers + + HTCP/0.0 CACHE-HDRS consist of zero or more of the following headers: + + Cache-Vary: ... + The sender of this header has learned that content varies on a set of + headers different from the set given in the object's Vary: header. + Cache-Vary:, if present, overrides the object's Vary: header. + + Cache-Location: : ... + The sender of this header has learned of one or more proxy caches who + are holding a copy of this object. Probing these caches with HTCP + may result in discovery of new, close-by (preferrable to current) + HTCP neighbors. + + Cache-Policy: [no-cache] [no-share] [no-cache-cookie] + The sender of this header has learned that the object's caching + policy has more detail than is given in its response headers. + + no-cache means that it is uncacheable (no reason given), + but may be shareable between simultaneous + requestors. + + no-share means that it is unshareable (no reason given), + and per-requestor tunnelling is always required). + + no-cache-cookie means that the content could change as a result of + different, missing, or even random cookies being + included in the request headers, and that caching + is inadvisable. + + + + + + + + + Expires December 1999 [Page 9] + + INTERNET-DRAFT HTCP June 1999 + + + Cache-Flags: [incomplete] + The sender of this header has modified the object's caching policy + locally, such that requesters may need to treat this response + specially, i.e., not necessarily in accordance with the object's + actual policy. + + incomplete means that the response headers and/or entity + headers given in this response are not known to be + complete, and may not be suitable for use as a + cache key. + + + Cache-Expiry: + The sender of this header has learned that this object should be + considered to have expired at a time different than that indicate by + its response headers. The format is the same as HTTP/1.1 Expires:. + + Cache-MD5: + The sender of this header has computed an MD5 checksum for this + object which is either different from that given in the object's + Content-MD5: header, or is being supplied since the object has no + Content-MD5 header. The format is the same as HTTP/1.1 Content-MD5:. + + Cache-to-Origin: + The sender of this header has measured the round trip time to an + origin server (given as a hostname or literal address). The is + the average number of seconds, expressed as decimal ASCII with + arbitrary precision and no exponent. is the number of RTT + samples which have had input to this average. is the number + of routers between the cache and the origin, expressed as decimal + ASCII with arbitrary precision and no exponent, or 0 if the cache + doesn't know. + + 6 - HTCP Operations + + HTCP/0.* opcodes and their respective OP-DATA are defined below: + + 6.1. NOP (OPCODE 0): + + This is an HTCP-level ``ping.'' Responders are encouraged to process + NOP's with minimum delay since the requestor may be using the NOP RTT + (round trip time) for configuration or mapping purposes. The RESPONSE + code for a NOP is always zero (0). There is no OP-DATA for a NOP. NOP + requests with RD=0 cause no processing to occur at all. + + + + + Expires December 1999 [Page 10] + + INTERNET-DRAFT HTCP June 1999 + + + 6.2. TST (OPCODE 1): + + Test for the presence of a specified content entity in a proxy cache. + TST requests with RD=0 cause no processing to occur at all. + + TST requests have the following OP-DATA: + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | | + / SPECIFIER / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + RESPONSE codes for TST are as follows: + + 0 entity is present in responder's cache + 1 entity is not present in responder's cache + + TST responses have the following OP-DATA, if RESPONSE is zero (0): + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | | + / DETAIL / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + TST responses have the following OP-DATA, if RESPONSE is one (1): + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | | + / CACHE-HDRS / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + DETAIL is a set of cache, entity, and response headers. The cache + headers are described above. Entity and response headers are + defined by HTTP. + + + + + + + Expires December 1999 [Page 11] + + INTERNET-DRAFT HTCP June 1999 + + + 6.3. MON (OPCODE 2): + + Monitor activity in a proxy cache's local object store (adds, deletes, + replacements, etc). Since interleaving of HTCP transaction over a + single pair of UDP endpoints is not supported, it is recommended that a + unique UDP endpoint be allocated by the requestor for each concurrent + MON request. MON requests with RD=0 are equivilent to those with RD=1 + and TIME=0; that is, they will cancel any outstanding MON transaction. + + MON requests have the following OP-DATA structure: + + +0 (MSB) + +---+---+---+---+---+---+---+---+ + 0: | TIME | + +---+---+---+---+---+---+---+---+ + + + TIME is the number of seconds of monitoring output desired by the + initiator. Subsequent MON requests from the same initiator with + the same TRANS-ID should update the time on a ongoing MON + transaction. This is called ``overlapping renew.'' + + RESPONSE codes for MON are as follows: + + 0 accepted, OP-DATA is present and valid + 1 refused (quota error -- too many MON's are active) + + + MON responses have the following OP-DATA structure, if RESPONSE is zero + (0): + + +0 (MSB) +1 (LSB) + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | TIME | ACTION | REASON | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 2: | | + / IDENTITY / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + TIME is the number of seconds remaining for this MON transaction. + + + + + + + Expires December 1999 [Page 12] + + INTERNET-DRAFT HTCP June 1999 + + + ACTION is a numeric code indicating a cache population action. Codes + are: + + 0 an entity has been added to the cache + 1 an entity in the cache has been refreshed + 2 an entity in the cache has been replaced + 3 an entity in the cache has been deleted + + + REASON is a numeric code indicating the reason for an ACTION. Codes + are: + + 0 some reason not covered by the other REASON codes + 1 a proxy client fetched this entity + 2 a proxy client fetched with caching disallowed + 3 the proxy server prefetched this entity + 4 the entity expired, per its headers + 5 the entity was purged due to caching storage limits + + + 6.4. SET (OPCODE 3): + + Inform a cache of the identity of an object. This is a ``push'' + transaction, whereby cooperating caches can share information such as + updated Age/Date/Expires headers (which might result from an origin + ``304 Not modified'' HTTP response) or updated cache headers (which + might result from the discovery of non-authoritative ``vary'' conditions + or from learning of second or third party cache locations for this + entity. RD is honoured. + + SET requests have the following OP-DATA structure: + + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | | + / IDENTITY / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + RESPONSE codes are as follows: + + 0 identity accepted, thank you + 1 identity ignored, no reason given, thank you + + + + + + Expires December 1999 [Page 13] + + INTERNET-DRAFT HTCP June 1999 + + + SET responses have no OP-DATA. + + 6.5. CLR (OPCODE 4): + + Tell a cache to completely forget about an entity. RD is honoured. + + CLR requests have the following OP-DATA structure: + + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 0: | RESERVED | REASON | + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + 2: | | + / SPECIFIER / + / / + +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+ + + + REASON is a numeric code indicating the reason why the requestor is + asking that this entity be removed. The codes are as follows: + + 0 some reason not better specified by another code + 1 the origin server told me that this entity does not exist + + + RESPONSE codes are as follows: + + 0 i had it, it's gone now + 1 i had it, i'm keeping it, no reason given. + 2 i didn't have it + + + CLR responses have no OP-DATA. + + Clearing a URI without specifying response, entity, or cache headers + means to clear all entities using that URI. + + 7 - Security Considerations + + If the optional AUTH element is not used, it is possible for + unauthorized third parties to both view and modify a cache using the + HTCP protocol. + + + + + + + + Expires December 1999 [Page 14] + + INTERNET-DRAFT HTCP June 1999 + + + 8 - Acknowledgements + + Mattias Wingstedt of Idonex brought key insights to the development of + this protocol. David Hankins helped clarify this document. + + 9 - References + + [RFC1630] + T. Berners-Lee, ``Universal Resource Identifiers in WWW,'', RFC 1630, + CERN, June 1994. + + [RFC2068] + R. Fielding, J. Gettys, J. Mogul, H. Frystyk, T. Berners-Lee, + ``Hypertext Transfer Protocol -- HTTP/1.1,'' RFC 2068, UC Irvine, + DEC, MIT/LCS, January 1997. + + [RFC2104] + H. Krawczyk, M. Bellare, R. Canetti, ``HMAC: Keyed-Hashing for + Message Authentication,'' RFC 2104, IBM and UCSD, February, 1997. + + [RFC2186] + D. Wessels, K. Claffy, ``Internet Cache Protocol (ICP), version 2,'' + RFC 2186, National Laboratory for Applied Network Research/UCSD, + September 1997. + + 10 - Author's Address + + + Paul Vixie + Internet Software Consortium + 950 Charter Street + Redwood City, CA 94063 + +1 650 779 7001 + + + + Duane Wessels + National Lab for Applied Network Research + USCD, 9500 Gilman Drive + La Jolla, CA 92093 + +1 303 497 1822 + + + + + + + + Expires December 1999 [Page 15] + diff --git a/errors/Czech/ERR_ACCESS_DENIED b/errors/Czech/ERR_ACCESS_DENIED index 78dc842e3a..5719302a1e 100644 --- a/errors/Czech/ERR_ACCESS_DENIED +++ b/errors/Czech/ERR_ACCESS_DENIED @@ -15,10 +15,11 @@ se objevila n Access Denied. -

-Pøistup k dokumentu byl stornován v dùsledku nedostatèných pøístupových -práv. Pokud jste pøesvìdèeni, ¾e se jedná o chybu, kontaktujte správce -vzdáleného serveru.

+

+Pøístup k dokumentu byl stornován v dùsledku nedostateèných pøístupových +práv. Pokud jste pøesvìdèeni, ¾e se jedná o chybu, kontaktujte +správce vzdáleného serveru. +

diff --git a/errors/Czech/ERR_CACHE_ACCESS_DENIED b/errors/Czech/ERR_CACHE_ACCESS_DENIED index 3e702eccfc..406a009937 100644 --- a/errors/Czech/ERR_CACHE_ACCESS_DENIED +++ b/errors/Czech/ERR_CACHE_ACCESS_DENIED @@ -1,6 +1,6 @@ -CHYBA: Pristup ke cache serveru odmitnut +CHYBA: Pristup ke cache serveru zamítnut

CHYBA

@@ -19,7 +19,7 @@ Cache Access Denied.

-

Olouváme se, ale pøístup k: +

Omlouváme se, ale pøístup k:

    %U
je povolen a¾ po autorizaci.

@@ -28,7 +28,7 @@ je povolen a Pro úspì¹nou autorizaci potøebujete prohlí¾eèe NETSCAPE 2.0 a vy¹¹í, Microsoft Internet Explorer 3.0 nebo prohlí¾eè podporující protokol -HTTP/1.1. V pøípadì problému se prosím obra»te nacache +HTTP/1.1. V pøípadì problému se prosím obra»te na cache administratora nebo si zmìòte -heslo. +heslo.

diff --git a/errors/Czech/ERR_CACHE_MGR_ACCESS_DENIED b/errors/Czech/ERR_CACHE_MGR_ACCESS_DENIED index e820a0f93b..0f27a23c48 100644 --- a/errors/Czech/ERR_CACHE_MGR_ACCESS_DENIED +++ b/errors/Czech/ERR_CACHE_MGR_ACCESS_DENIED @@ -1,10 +1,10 @@ -CHYBA: Cache Manager Access Denied +CHYBA: Pristup ke cache manazeru zamitnut

CHYBA

-

Cache Manager Access Denied

+

Pøístup ke cache mana¾eru zamítnut


Pøi pokusu o pøístup k: @@ -27,7 +27,7 @@ je dostupn

Pro úspì¹nou autorizaci potøebujete prohlí¾eèe NETSCAPE 2.0 a vy¹¹í, Microsoft Internet Explorer 3.0 nebo prohlí¾eè podporující protokol -HTTP/1.1. V pøípadì problému se prosím obra»te nacache +HTTP/1.1. V pøípadì problému se prosím obra»te na cache administratora nebo si zmìòte -heslo. -heslo. +

\ No newline at end of file diff --git a/errors/Czech/ERR_CANNOT_FORWARD b/errors/Czech/ERR_CANNOT_FORWARD index 9fb2437a09..f95d5cc34f 100644 --- a/errors/Czech/ERR_CANNOT_FORWARD +++ b/errors/Czech/ERR_CANNOT_FORWARD @@ -13,7 +13,7 @@ se objevila n
  • -Po¾adavek není mo¾no pøemìrovat. +Unable to forward this request at this time.
diff --git a/errors/Czech/ERR_CONNECT_FAIL b/errors/Czech/ERR_CONNECT_FAIL index 748adc8576..f8280bcf64 100644 --- a/errors/Czech/ERR_CONNECT_FAIL +++ b/errors/Czech/ERR_CONNECT_FAIL @@ -13,7 +13,7 @@ se objevila n
  • -Connection Failed +Connection Failed.
diff --git a/errors/Czech/ERR_DNS_FAIL b/errors/Czech/ERR_DNS_FAIL index fa9d2be199..1bbc332733 100644 --- a/errors/Czech/ERR_DNS_FAIL +++ b/errors/Czech/ERR_DNS_FAIL @@ -10,7 +10,7 @@ P

se objevila následující chyba:

-Jméno serveru není mo¾no pøevést na IP adresu +Jméno serveru není mo¾no pøevést na IP adresu. %H
@@ -23,9 +23,11 @@ DNS server odpov

To znamená ¾e: -

+

+

    +
  • Cache server nebyl schopen získat adresu serveru uvedeného v URL. Zkontrolujte prosím adresu. -
+

diff --git a/errors/Czech/ERR_FORWARDING_DENIED b/errors/Czech/ERR_FORWARDING_DENIED index 9e555dbef5..582c9002eb 100644 --- a/errors/Czech/ERR_FORWARDING_DENIED +++ b/errors/Czech/ERR_FORWARDING_DENIED @@ -18,7 +18,7 @@ Forwarding Denied.

-Cache server nepøesmìruje Vá¹ po¾adavek, proto¾e by to nedovolují vztahy v -hierarchické struktuøe cache serverù. %i je pravd¾podobnì chybnì +Cache server nepøesmìruje Vá¹ po¾adavek, proto¾e to nedovolují vztahy v +hierarchické struktuøe cache serverù. %i je pravdìpodobnì chybnì nakonfigurovaný cache server.

diff --git a/errors/Czech/ERR_FTP_DISABLED b/errors/Czech/ERR_FTP_DISABLED index 76bd902fd3..ffd4a34ccd 100644 --- a/errors/Czech/ERR_FTP_DISABLED +++ b/errors/Czech/ERR_FTP_DISABLED @@ -1,6 +1,5 @@ -heslo. CHYBA: Pozadovany dokument je nedostupny

CHYBA

@@ -14,7 +13,7 @@ se objevila n
  • -FTP is Disabled +FTP is Disabled.
diff --git a/errors/Czech/ERR_FTP_FAILURE b/errors/Czech/ERR_FTP_FAILURE index c80deb1e61..9aaf33c311 100644 --- a/errors/Czech/ERR_FTP_FAILURE +++ b/errors/Czech/ERR_FTP_FAILURE @@ -10,7 +10,7 @@ P %U se objevila chyba FTP.

-Squid odeslal následující FTP pøíkaz: +Squid odeslal následující FTP pøíkaz:

%f
a obdr¾el tuto odpovìï:
%F
diff --git a/errors/Czech/ERR_FTP_FORBIDDEN b/errors/Czech/ERR_FTP_FORBIDDEN index 5f1b5e9843..929590757e 100644 --- a/errors/Czech/ERR_FTP_FORBIDDEN +++ b/errors/Czech/ERR_FTP_FORBIDDEN @@ -1,17 +1,18 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovany dokument je nedostupny -

ERROR

-

The requested URL could not be retrieved

+

CHYBA

+

Po¾adovaný dokument je nedostupný


-An FTP authentication failure occurred -while trying to retrieve the URL: +Pøi pokusu o pøístup k URL: %U +se objevila chyba autorizace FTP.

-Squid sent the following FTP command: +Squid odeslal následující FTP pøíkaz:

%f
-and then received this reply +a obdr¾el tuto odpovìï:
%F
%g

diff --git a/errors/Czech/ERR_FTP_NOT_FOUND b/errors/Czech/ERR_FTP_NOT_FOUND index a9b3f04406..3a76152a5d 100644 --- a/errors/Czech/ERR_FTP_NOT_FOUND +++ b/errors/Czech/ERR_FTP_NOT_FOUND @@ -1,21 +1,22 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovany dokument je nedostupny -

ERROR

-

The requested URL could not be retrieved

+

CHYBA

+

Po¾adovaný dokument je nedostupný


-The following URL could not be retrieved: +Po¾adovaný dokument na adrese: %U +nebyl nalezen.

-Squid sent the following FTP command: +Squid odeslal následující FTP pøíkaz:

%f
-and then received this reply +a obdr¾el tuto odpovìï:
%F
%g

-This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +To mù¾e být zapøíèinìno pou¾itím absolutní cesty v FTP URL (co¾ odporuje RFC +1738). V tomto pøípadì mù¾e být dokument nalezen na %B.

diff --git a/errors/Czech/ERR_FTP_PUT_ERROR b/errors/Czech/ERR_FTP_PUT_ERROR index ace7f7fdce..00d7bb4386 100644 --- a/errors/Czech/ERR_FTP_PUT_ERROR +++ b/errors/Czech/ERR_FTP_PUT_ERROR @@ -1,8 +1,16 @@ - CHYBA: neuspesny FTP upload -

CHYBA

FTP PUT/upload neúspì¹ný


Pøi -pokusu o PUT následujícího URL: %U

odeslal Squid -následující FTP pøíkaz:


+
+
+CHYBA: neuspesny FTP upload 
+
+

CHYBA

+

FTP PUT/upload neúspì¹ný

+
+

+Pøi pokusu o PUT následujícího URL: +%U +

+odeslal Squid následující FTP pøíkaz: +


         %f
 
a obdr¾el tuto odpovìï @@ -11,8 +19,10 @@ a obdr

Zkuste: -

+

+

    +
  • zkontrolovat cestu, pøístupová práva, volné místo na disku a zkuste to znova. -
+

diff --git a/errors/Czech/ERR_FTP_UNAVAILABLE b/errors/Czech/ERR_FTP_UNAVAILABLE index d330368f3b..c5adb28b7a 100644 --- a/errors/Czech/ERR_FTP_UNAVAILABLE +++ b/errors/Czech/ERR_FTP_UNAVAILABLE @@ -1,16 +1,17 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovany dokument je nedostupny -

ERROR

-

The requested URL could not be retrieved

+

CHYBA

+

Po¾adovaný dokument je nedostupný


-The FTP server was too busy while trying to retrieve the URL: +FTP server je pøetí¾en a nemù¾e poslat nasledující dokument: %U

-Squid sent the following FTP command: +Squid odeslal následující FTP pøíkaz:

%f
-and then received this reply +a obdr¾el tuto odpovìï:
%F
%g

diff --git a/errors/Czech/ERR_INVALID_REQ b/errors/Czech/ERR_INVALID_REQ index 70e1eb2f9b..8e51ebf355 100644 --- a/errors/Czech/ERR_INVALID_REQ +++ b/errors/Czech/ERR_INVALID_REQ @@ -6,7 +6,6 @@

Po¾adovaný dokument je nedostupný


-While trying to process the request: Pøi provádìní po¾adavku:

 %R
@@ -16,18 +15,18 @@ se objevila n
 
  • -Invalid Request +Invalid Request.

-Èáast HTTP po¾adavku je chybná: +Èást HTTP po¾adavku je chybná:

  • Chybná nebo chybìjící HTTP metoda (POST, GET)
  • Chybìjící URL
  • Chybìjící identifikátor HTTP (HTTP/1.0)
  • Po¾adavek mù¾e být pøíli¹ velký
  • Chybìjící polo¾ka Content-Lenght pro po¾adavky POST a GET -
  • Zakázaný znak v hostnam; (podtr¾ítko ??) +
  • Chybný znak v hostname (podtr¾ítko ??)

diff --git a/errors/Czech/ERR_INVALID_URL b/errors/Czech/ERR_INVALID_URL index 47cb36e45a..22c93b9d02 100644 --- a/errors/Czech/ERR_INVALID_URL +++ b/errors/Czech/ERR_INVALID_URL @@ -13,7 +13,7 @@ se objevila n
  • -Invalid URL +Invalid URL.
@@ -23,6 +23,6 @@ N
  • Chybný nebo chybìjící protokol (http:// ..)
  • Chybìjící hostname
  • Chybný double-escape v URL cestì -
  • Chybný znak v hostname (podtr¾ítko _) +
  • Chybný znak v hostname (podtr¾ítko ??)

    diff --git a/errors/Czech/ERR_LIFETIME_EXP b/errors/Czech/ERR_LIFETIME_EXP index 0b3661059d..9cf30b66a7 100644 --- a/errors/Czech/ERR_LIFETIME_EXP +++ b/errors/Czech/ERR_LIFETIME_EXP @@ -13,11 +13,10 @@ se objevila n
    • -Connection Lifetime Expired +Connection Lifetime Expired.

    -Squid stornoval po¾adavek z dùvodu pøekoroèní maximální délky trvání -spojení. +Squid stornoval po¾adavek z dùvodu pøekroèení maximální délky trvání spojení.

    diff --git a/errors/Czech/ERR_NO_RELAY b/errors/Czech/ERR_NO_RELAY index ef0f901e04..91e197b6ba 100644 --- a/errors/Czech/ERR_NO_RELAY +++ b/errors/Czech/ERR_NO_RELAY @@ -13,10 +13,10 @@ se objevila n
    • -No Wais Relay +No Wais Relay.

    -Cache server nema definovaný WAIS Relay! Vyhubujte administratorovi. -

    \ No newline at end of file +Cache server nemá definovaný WAIS Relay! Vyhubujte administratorovi. +

    \ No newline at end of file diff --git a/errors/Czech/ERR_ONLY_IF_CACHED_MISS b/errors/Czech/ERR_ONLY_IF_CACHED_MISS index 19bcb3c973..c3d92f4212 100644 --- a/errors/Czech/ERR_ONLY_IF_CACHED_MISS +++ b/errors/Czech/ERR_ONLY_IF_CACHED_MISS @@ -13,15 +13,9 @@ se objevila n
    • -Po¾adovaný dokument se nenachází v cachi pøièm¾ byla specifikována direktiva -only-if-cached. +Po¾adovaný dokument se nenachází v cachi, pøièem¾ byla specifikována direktiva +only-if-cached.
    -

    - -You have issued a request with a only-if-cached cache control -directive. The document was not found in the cache, or it required -revalidation prohibited by only-if-cached directive. -

    diff --git a/errors/Czech/ERR_READ_ERROR b/errors/Czech/ERR_READ_ERROR index c33ed7f14b..4bb8875d32 100644 --- a/errors/Czech/ERR_READ_ERROR +++ b/errors/Czech/ERR_READ_ERROR @@ -13,7 +13,7 @@ se objevila n
    • -Read Error +Read Error.
    diff --git a/errors/Czech/ERR_READ_TIMEOUT b/errors/Czech/ERR_READ_TIMEOUT index a24eae5001..6adcf0f859 100644 --- a/errors/Czech/ERR_READ_TIMEOUT +++ b/errors/Czech/ERR_READ_TIMEOUT @@ -13,7 +13,7 @@ se objevila n
    • -Read Timeout +Read Timeout.
    diff --git a/errors/Czech/ERR_SHUTTING_DOWN b/errors/Czech/ERR_SHUTTING_DOWN index fa3ed7650c..f258d51e0f 100644 --- a/errors/Czech/ERR_SHUTTING_DOWN +++ b/errors/Czech/ERR_SHUTTING_DOWN @@ -3,15 +3,19 @@ CHYBA: Pozadovany dokument je nedostupny

    CHYBA

    -

    Poøadovaný dokument je nedostupný

    +

    Po¾adovaný dokument je nedostupný


    Pøi pokusu o pøístup k: %U

    se objevila následující chyba: -

      +

        +
      • + Tento cache server je právì restartován a nemù¾e obslou¾it Vá¹ po¾adavek. Zkuste to za chvíli znova. + +

      diff --git a/errors/Czech/ERR_SOCKET_FAILURE b/errors/Czech/ERR_SOCKET_FAILURE index 16a35ba138..63c6103f1e 100644 --- a/errors/Czech/ERR_SOCKET_FAILURE +++ b/errors/Czech/ERR_SOCKET_FAILURE @@ -13,7 +13,7 @@ se objevila n
      • -Socket Failure +Socket Failure.
      diff --git a/errors/Czech/ERR_TOO_BIG b/errors/Czech/ERR_TOO_BIG index 337538ee43..54fefcdb50 100644 --- a/errors/Czech/ERR_TOO_BIG +++ b/errors/Czech/ERR_TOO_BIG @@ -1,26 +1,24 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovany dokument je nedostupny -

      ERROR

      -

      The requested URL could not be retrieved

      +

      CHYBA

      +

      Po¾adovaný dokument je nedostupný


      -

      -While trying to retrieve the URL: +Pøi pokusu o pøístup k: %U

      -The following error was encountered: +se objevila následující chyba:

      • The request or reply is too large. -

        -If you are making a POST or PUT request, then your request body -(the thing you are trying to upload) is too large. If you are -making a GET request, then the reply body (what you are trying -to download) is too large. These limits have been established -by the Internet Service Provider who operates this cache. Please -contact them directly if you feel this is an error.

      -Your cache administrator is %w. +Pøi pou¾ití metody POST, PUT nebo GET byl po¾adovaný dokument pøíli¹ +velký a pøekroèil mo¾ný limit povolený k pøenosu (pravdìpodobnì jste +se sna¾ili o posílání/pøíjem dokumentu na server nebo ze serveru). +Tento limit byl nastaven administrátorem této cache. Pokud si myslíte, +¾e je potøeba tyto limity zmìnit, kontaktujte ho. +

      \ No newline at end of file diff --git a/errors/Czech/ERR_UNSUP_REQ b/errors/Czech/ERR_UNSUP_REQ index 608b7796ec..2bc19a4856 100644 --- a/errors/Czech/ERR_UNSUP_REQ +++ b/errors/Czech/ERR_UNSUP_REQ @@ -13,12 +13,12 @@ se objevila n
      • -Unsupported Request Method and Protocol +Unsupported Request Method and Protocol.

      Squid nepodporuje v¹echny typy metod u v¹ech protokolù. Napø. není mo¾no -pou¾it metodu POST u slu¾by GOPHER +pou¾it metodu POST u slu¾by GOPHER.

      diff --git a/errors/Czech/ERR_URN_RESOLVE b/errors/Czech/ERR_URN_RESOLVE index 9225b62ac9..0752eb4c9c 100644 --- a/errors/Czech/ERR_URN_RESOLVE +++ b/errors/Czech/ERR_URN_RESOLVE @@ -1,22 +1,22 @@ -CHYBA: Po¾adované URN je nedostupné +CHYBA: Po¾adované URN je nedostupné

      CHYBA

      -

      A URL pro po¾adované URN je nedostupné

      +

      URL pro po¾adované URN je nedostupné


      -Pøi pokusu o pøístup k URN: +Pøi pokusu o pøístup k URN: %U

      se objevila následující chyba:

      • -Cannot Resolve URN +Cannot Resolve URN.

      -Hey, don't expect too much from URNs on %T :) +Hey, neoèekáváte pøíli¹ mnoho od URN na %T :)

      diff --git a/errors/Czech/ERR_WRITE_ERROR b/errors/Czech/ERR_WRITE_ERROR index 56d3af4c19..8c889906fd 100644 --- a/errors/Czech/ERR_WRITE_ERROR +++ b/errors/Czech/ERR_WRITE_ERROR @@ -13,7 +13,7 @@ se objevila n
      • -Write Error +Write Error.
      @@ -22,5 +22,5 @@ Syst
          %E

      -Chyba zápisu na sí». Opakjte prosím po¾adavek. +Chyba zápisu na sí». Opakujte prosím po¾adavek.

      diff --git a/errors/Czech/ERR_ZERO_SIZE_OBJECT b/errors/Czech/ERR_ZERO_SIZE_OBJECT index 164337c1b7..9b708524ec 100644 --- a/errors/Czech/ERR_ZERO_SIZE_OBJECT +++ b/errors/Czech/ERR_ZERO_SIZE_OBJECT @@ -13,10 +13,10 @@ se objevila n
      • -Zero Sized Reply +Zero Sized Reply.

      -Squid neobdr¾el v opdovìdi na tento dotaz ¾ádná data. +Squid neobdr¾el v odpovìdi na tento dotaz ¾ádná data.

      diff --git a/errors/Czech/README b/errors/Czech/README index 055ab1618c..d8ababf0fd 100644 --- a/errors/Czech/README +++ b/errors/Czech/README @@ -1,2 +1,2 @@ -Thank you to Jakub Nantl for -creating these error pages in Czech! +Thank you to Jakub Nantl and Radek Malcic + for creating these error pages in Czech! diff --git a/errors/French/ERR_FTP_FORBIDDEN b/errors/French/ERR_FTP_FORBIDDEN index 5f1b5e9843..746563684f 100644 --- a/errors/French/ERR_FTP_FORBIDDEN +++ b/errors/French/ERR_FTP_FORBIDDEN @@ -1,17 +1,17 @@ -ERROR: The requested URL could not be retrieved +ERREUR: L'URL demandée n'a pu être chargée -

      ERROR

      -

      The requested URL could not be retrieved

      +

      ERREUR

      +

      L'URL demandée n'a pu être chargée


      -An FTP authentication failure occurred -while trying to retrieve the URL: +Une erreur d'authentification sur un FTP a eu lieu. +En tentant de charger l'URL: %U

      -Squid sent the following FTP command: +Squid a envoyé la commande FTP suivante:

      %f
      -and then received this reply +et a recu en réponse
      %F
      %g

      diff --git a/errors/French/ERR_FTP_NOT_FOUND b/errors/French/ERR_FTP_NOT_FOUND index a9b3f04406..86b14309ca 100644 --- a/errors/French/ERR_FTP_NOT_FOUND +++ b/errors/French/ERR_FTP_NOT_FOUND @@ -1,21 +1,22 @@ -ERROR: The requested URL could not be retrieved +ERREUR: L'URL demandée n'a pu être chargée -

      ERROR

      -

      The requested URL could not be retrieved

      +

      ERREUR

      +

      L'URL demandée n'a pu être chargée


      -The following URL could not be retrieved: +L'URL suivante n'a pu être chargée: %U

      -Squid sent the following FTP command: +Squid a envoyé la commande FTP suivante:

      %f
      -and then received this reply +et a recu en retour:
      %F
      %g

      -This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +Ceci pourrait etre causé par une URL de FTP avec un +chemin absolu (ce qui n'est pas conforme au RFC 1738) . Si c'est +effectivement le cas, alors le fichier se trouve à +l'adresse %B.

      diff --git a/errors/French/ERR_FTP_UNAVAILABLE b/errors/French/ERR_FTP_UNAVAILABLE index d330368f3b..6d0e2c659a 100644 --- a/errors/French/ERR_FTP_UNAVAILABLE +++ b/errors/French/ERR_FTP_UNAVAILABLE @@ -1,16 +1,16 @@ -ERROR: The requested URL could not be retrieved +ERREUR: L'URL demandée n'a pu être chargée -

      ERROR

      -

      The requested URL could not be retrieved

      +

      ERREUR

      +

      L'URL demandée n'a pu être chargée.


      -The FTP server was too busy while trying to retrieve the URL: +Le serveur FTP était trop encombré pour charger l'URL: %U

      -Squid sent the following FTP command: +Squid a envoyé la commande FTP suivante:

      %f
      -and then received this reply +et a recu la réponse suivante en retour:
      %F
      %g

      diff --git a/errors/French/ERR_SHUTTING_DOWN b/errors/French/ERR_SHUTTING_DOWN index 03ca2ceb0c..b9314cfaec 100644 --- a/errors/French/ERR_SHUTTING_DOWN +++ b/errors/French/ERR_SHUTTING_DOWN @@ -1,17 +1,17 @@ -ERREUR: La requete demandee n'a pu etre traitée +ERREUR: La requete demandée n'a pu etre effectuée

      ERREUR

      -

      La requete demandée n'a pu etre traitée

      +

      La requete demandée n'a pu etre effectuée


      -En tentant de résoudre l'URL: +En essayant de charger l'URL: %U

      -L'erreur suivante a été rencontrée: +L'erreur suivante fut rencontrée:

        -La mise hors-service de ce cache est en cours et il est impossible -de traiter votre requete actuellement. Veuillez -réitérer votre requete ultérieurement. +Ce cache est en cours de mise hors-service temporaire et il +lui est impossible de satisfaire votre requete actuellement. +Veuillez réiterer votre requete ultérieurement.

        diff --git a/errors/Italian/ERR_FTP_FORBIDDEN b/errors/Italian/ERR_FTP_FORBIDDEN index 5f1b5e9843..825aa19f2e 100644 --- a/errors/Italian/ERR_FTP_FORBIDDEN +++ b/errors/Italian/ERR_FTP_FORBIDDEN @@ -1,17 +1,17 @@ -ERROR: The requested URL could not be retrieved +ERRORE: La URL richiesta non può essere recuperata

        ERROR

        -

        The requested URL could not be retrieved

        +

        La URL richiesta non può essere recuperata


        -An FTP authentication failure occurred -while trying to retrieve the URL: +Un errore di autenticazione tramite FTP è avvenuto +mentre si cercava di recuperare la URL: %U

        -Squid sent the following FTP command: +Squid ha inviato il seguente comando FTP:

        %f
        -and then received this reply +e ha ricevuto la seguente risposta:
        %F
        %g

        diff --git a/errors/Italian/ERR_FTP_NOT_FOUND b/errors/Italian/ERR_FTP_NOT_FOUND index a9b3f04406..82163a587e 100644 --- a/errors/Italian/ERR_FTP_NOT_FOUND +++ b/errors/Italian/ERR_FTP_NOT_FOUND @@ -1,21 +1,21 @@ -ERROR: The requested URL could not be retrieved +ERRORE: La URL richiesta non può essere recuperata

        ERROR

        -

        The requested URL could not be retrieved

        +

        La URL richiesta non può essere recuperata


        -The following URL could not be retrieved: +La seguente URL non può essere recuperata: %U

        -Squid sent the following FTP command: +Squid ha inviato il seguente comando FTP:

        %f
        -and then received this reply +e ha ricevuto la risposta:
        %F
        %g

        -This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +Questo potrebbe essere dovuto ad una URL FTP con un percorso assoluto +(che non è conforme al RFC 1738). In tal caso, si può +recuperare il file in %B.

        diff --git a/errors/Italian/ERR_FTP_UNAVAILABLE b/errors/Italian/ERR_FTP_UNAVAILABLE index d330368f3b..6ca6fa3c8d 100644 --- a/errors/Italian/ERR_FTP_UNAVAILABLE +++ b/errors/Italian/ERR_FTP_UNAVAILABLE @@ -1,16 +1,16 @@ -ERROR: The requested URL could not be retrieved +ERRORE: La URL richiesta non può essere recuperata

        ERROR

        -

        The requested URL could not be retrieved

        +

        La URL richiesta non può essere recuperata


        -The FTP server was too busy while trying to retrieve the URL: +Il server FTP era troppo occupato al momento di recuperare la URL: %U

        -Squid sent the following FTP command: +Squid ha inviato il seguente comando FTP:

        %f
        -and then received this reply +e ha ricevuto la seguente risposta:
        %F
        %g

        diff --git a/errors/Italian/ERR_SHUTTING_DOWN b/errors/Italian/ERR_SHUTTING_DOWN index 3cc9afaa05..3d4b4843ad 100644 --- a/errors/Italian/ERR_SHUTTING_DOWN +++ b/errors/Italian/ERR_SHUTTING_DOWN @@ -1,17 +1,17 @@ -ERROR: The requested URL could not be retrieved +ERRORE: La URL richiesta non può essere recuperata

        ERROR

        -

        The requested URL could not be retrieved

        +

        La URL richiesta non può essere recuperata


        -While trying to retrieve the URL: +Cercando di recuperare la URL: %U

        -The following error was encountered: +È occorso il seguente errore:

          -This cache is in the process of shutting down and can not -service your request at this time. Please retry your -request again soon. +Questa cache è in fase di shutdown e non può +provvedere alla vostra richiesta in questo momento. Si prega +di riprovare più tardi.

          diff --git a/errors/Italian/README b/errors/Italian/README index 1459496602..649ec42af2 100644 --- a/errors/Italian/README +++ b/errors/Italian/README @@ -1,2 +1,2 @@ -Thank you to Alessio Bragadini for -creating these error pages in Italian! +Thank you to Alessio Bragadini and Marco Mesturino + for creating these error pages in Italian! diff --git a/errors/Makefile.in b/errors/Makefile.in index c8b33d8333..52381bb8a3 100644 --- a/errors/Makefile.in +++ b/errors/Makefile.in @@ -1,4 +1,4 @@ -# $Id: Makefile.in,v 1.5 1998/04/24 05:26:39 wessels Exp $ +# $Id: Makefile.in,v 1.6 1999/10/04 05:04:11 wessels Exp $ # prefix = @prefix@ exec_prefix = @exec_prefix@ @@ -45,6 +45,25 @@ install-mkdirs: mkdir $(DEFAULT_ERROR_DIR); \ fi +# undocumented hack. You can use this target to create multi-lingual +# error pages. For example: +# +# make ADDLANG=English addlang +# +# by Andres Kroonmaa +# +addlang: all + -@if test -d $(srcdir)/$(ADDLANG); then \ + cd $(srcdir)/$(ADDLANG)/ ; \ + for f in ERR_*; do \ + if test -f $(DEFAULT_ERROR_DIR)/$$f ; then \ + echo "appending $(ADDLANG)/$$f"; \ + echo "
          " >> $(DEFAULT_ERROR_DIR)/$$f ; \ + cat $$f >> $(DEFAULT_ERROR_DIR)/$$f; \ + fi; \ + done; \ + fi + clean: distclean: clean diff --git a/errors/Slovak/ERR_FTP_FORBIDDEN b/errors/Slovak/ERR_FTP_FORBIDDEN index 5f1b5e9843..8a9f210bb6 100644 --- a/errors/Slovak/ERR_FTP_FORBIDDEN +++ b/errors/Slovak/ERR_FTP_FORBIDDEN @@ -1,17 +1,17 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovany dokument je nedostupny -

          ERROR

          -

          The requested URL could not be retrieved

          +

          CHYBA

          +

          Po¾adovaný dokument je nedostupný


          -An FTP authentication failure occurred -while trying to retrieve the URL: +Pri pokuse získa» URL sa vyskytla chyba autentifikácie: %U

          -Squid sent the following FTP command: +Squid zaslal nasledujúci FTP príkaz:

          %f
          -and then received this reply +a obdr¾al nasledovnú opoveï:
          %F
          %g

          diff --git a/errors/Slovak/ERR_FTP_NOT_FOUND b/errors/Slovak/ERR_FTP_NOT_FOUND index a9b3f04406..3b47dca803 100644 --- a/errors/Slovak/ERR_FTP_NOT_FOUND +++ b/errors/Slovak/ERR_FTP_NOT_FOUND @@ -1,21 +1,21 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovane URL je nedostupne -

          ERROR

          -

          The requested URL could not be retrieved

          +

          CHYBA

          +

          Po¾adované URL je nedostupné


          -The following URL could not be retrieved: +Nasledovné URL je nedostupné: %U

          -Squid sent the following FTP command: +Squid zaslal nasledujúci FTP príkaz:

          %f
          -and then received this reply +a obdr¾al nasledovnú opoveï:
          %F
          %g

          -This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +To mô¾e by» spôsobené uvedením absolútnej cesty v FTP URL (èo odporuje RFC +1738). V tomto prípade by ste dokument mohli nájs» na %B.

          diff --git a/errors/Slovak/ERR_FTP_UNAVAILABLE b/errors/Slovak/ERR_FTP_UNAVAILABLE index d330368f3b..f99f13146d 100644 --- a/errors/Slovak/ERR_FTP_UNAVAILABLE +++ b/errors/Slovak/ERR_FTP_UNAVAILABLE @@ -1,16 +1,17 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovane URL je nedostupne -

          ERROR

          -

          The requested URL could not be retrieved

          +

          CHYBA

          +

          Po¾adované URL je nedostupné


          -The FTP server was too busy while trying to retrieve the URL: +FTP server bol príli¹ zaneprázdnený pri pokuse získat nasledovné URL: %U

          -Squid sent the following FTP command: +Squid zaslal nasledujúci FTP príkaz:

          %f
          -and then received this reply +a obdr¾al nasledovnú opoveï:
          %F
          %g

          diff --git a/errors/Slovak/ERR_SHUTTING_DOWN b/errors/Slovak/ERR_SHUTTING_DOWN index 3cc9afaa05..0bcf81d3de 100644 --- a/errors/Slovak/ERR_SHUTTING_DOWN +++ b/errors/Slovak/ERR_SHUTTING_DOWN @@ -1,17 +1,17 @@ -ERROR: The requested URL could not be retrieved + +CHYBA: Pozadovany dokument je nedostupny -

          ERROR

          -

          The requested URL could not be retrieved

          +

          CHYBA

          +

          Po¾adovaný dokument je nedostupný


          -While trying to retrieve the URL: +Pri pokuse o prístup k: %U

          -The following error was encountered: +do¹lo k nasledovnej chybe:

            -This cache is in the process of shutting down and can not -service your request at this time. Please retry your -request again soon. +Táto cache sa práve vypína a nemô¾e spracova» Va¹u po¾iadavku. Opakujte +prosím svoju po¾iadavku za nejaký èas.

            diff --git a/errors/Spanish/ERR_ACCESS_DENIED b/errors/Spanish/ERR_ACCESS_DENIED index f67fd77ca7..c34371b460 100644 --- a/errors/Spanish/ERR_ACCESS_DENIED +++ b/errors/Spanish/ERR_ACCESS_DENIED @@ -15,8 +15,8 @@ Ha ocurrido el siguiente problema: Acceso Denegado.

            -Las reglas de control de acceso impiden que su petición sea +Las reglas de control de acceso impiden que su petición sea permitida en este momento. Contacte con su proveedor de servicios si cree que esto es incorrecto. -

          +
        diff --git a/errors/Spanish/ERR_CACHE_ACCESS_DENIED b/errors/Spanish/ERR_CACHE_ACCESS_DENIED new file mode 100644 index 0000000000..3398098ad5 --- /dev/null +++ b/errors/Spanish/ERR_CACHE_ACCESS_DENIED @@ -0,0 +1,32 @@ + +ERROR: Acceso Denegado al Cache + + +

        ERROR

        +

        Acceso Denegado al Cache

        +
        +

        +Mientras se intentaba traer el URL: +%U +

        +Ha ocurrido el siguiente problema: +

          +
        • + +Acceso denegado al cache. + +
        + +

        +Disculpe, Ud. no está autorizado a acceder a: +

            %U
        +desde este cache hasta que se haya autenticado. + + +

        +Ud. necesita utilizar Netscape version 2.0 o superior, o Microsoft Internet +Explorer 3.0, o un navegador que cumpla con HTTP/1.1 para que funcione. +Por favor contacte al administrador del cache si +tiene dificultad para autenticarse o +cambie su password. +

        diff --git a/errors/Spanish/ERR_CACHE_MGR_ACCESS_DENIED b/errors/Spanish/ERR_CACHE_MGR_ACCESS_DENIED new file mode 100644 index 0000000000..dcd0d1d911 --- /dev/null +++ b/errors/Spanish/ERR_CACHE_MGR_ACCESS_DENIED @@ -0,0 +1,31 @@ + +ERROR: Acceso Denegado al Cache Manager + + +

        ERROR

        +

        Acceso Denegado al Cache Manager

        +
        +

        +Mientras se intentaba traer el URL: +%U +

        +Ha ocurrido el siguiente problema: +

          +
        • + +Acceso Denegado al Cache Manager. + +
        + +

        Disculpe, Ud. no está autorizado a acceder a: +

            %U
        +desde este cache hasta que se haya autenticado. + +

        +Ud. necesita utilizar Netscape version 2.0 o superior, o Microsoft Internet +Explorer 3.0, o un navegador que cumpla con HTTP/1.1 para que funcione. +Por favor contacte al administrador del cache si +tiene dificultad para autenticarse, o si Ud. es el administrador, +lea la documentación de Squid sobre interfaz del cache manager y +chequee en el log de cache mensajes de error más detallados. +

        diff --git a/errors/Spanish/ERR_CANNOT_FORWARD b/errors/Spanish/ERR_CANNOT_FORWARD index 3ff3b090d4..e88dfc1029 100644 --- a/errors/Spanish/ERR_CANNOT_FORWARD +++ b/errors/Spanish/ERR_CANNOT_FORWARD @@ -12,18 +12,18 @@ Ha ocurrido el siguiente problema:
        • -Imposibilidad de enviar la petición en este momento. +Imposibilidad de enviar la petición en este momento.

        -La petición no ha podido ser enviada al servidor origen o a alguna -de las cachés padres. Las razones más probables de que haya ocurrido -este error son: +La petición no ha podido ser enviada al servidor origen o a alguna +de las cachés padres. Las razones más probables de que haya +ocurrido este error son:

          -
        • El administrador de la caché no permite que esta - caché realice conexiones directas a los servidores +
        • El administrador de la caché no permite que esta + caché realice conexiones directas a los servidores origen, y -
        • Todas las cachés configuradas como padres son inalcanzables -en este momento. +
        • Todas las cachés configuradas como padres son inalcanzables + en este momento.
        diff --git a/errors/Spanish/ERR_CONNECT_FAIL b/errors/Spanish/ERR_CONNECT_FAIL index 33fa04c1bd..eb1a0abfc6 100644 --- a/errors/Spanish/ERR_CONNECT_FAIL +++ b/errors/Spanish/ERR_CONNECT_FAIL @@ -12,7 +12,7 @@ Ha ocurrido el siguiente problema:
        • -Conexión fallida. +Conexión fallida.
        @@ -22,4 +22,4 @@ El sistema ha devuelto el siguiente mensaje:

        El equipo remoto o la red pueden estar fuera de servicio. -Por favor, intente de nuevo la petición en otro momento. +Por favor, intente de nuevo la petición. diff --git a/errors/Spanish/ERR_DNS_FAIL b/errors/Spanish/ERR_DNS_FAIL index 33668081a0..290127187b 100644 --- a/errors/Spanish/ERR_DNS_FAIL +++ b/errors/Spanish/ERR_DNS_FAIL @@ -9,11 +9,10 @@ Mientras se intentaba traer el URL:

        Ha ocurrido el siguiente problema:

        -Incapaz de determinar la dirección IP a partir +Incapaz de determinar la dirección IP a partir del nombre de la máquina: %H
        -

      El programa dnsserver ha devuelto el siguiente mensaje: @@ -24,9 +23,8 @@ El programa dnsserver ha devuelto el siguiente mensaje:

      Esto significa que:

      - La caché no ha sido capaz de resolver el nombre de máquina
      + La caché no ha sido capaz de resolver el nombre de máquina
        presente en la URL.
      - Compruebe que la dirección sea correcta.
      + Compruebe que la dirección sea correcta.
       
      -

      diff --git a/errors/Spanish/ERR_FORWARDING_DENIED b/errors/Spanish/ERR_FORWARDING_DENIED index 027fe7c882..c689582f1f 100644 --- a/errors/Spanish/ERR_FORWARDING_DENIED +++ b/errors/Spanish/ERR_FORWARDING_DENIED @@ -12,12 +12,12 @@ Ha ocurrido el siguiente problema:
      • -Reenvío denegado. +Reenvío denegado.

      -Esta caché no permite reenviar su petición porque trata de obligar a -mantener una relación de hermandad. -Quizás el cliente en %i es una caché que ha sido mal configurada. +Esta caché no permite reenviar su petición porque trata de +obligar a mantener una relación de hermandad. +Quizás el cliente en %i es una caché que ha sido mal configurada.

      diff --git a/errors/Spanish/ERR_FTP_DISABLED b/errors/Spanish/ERR_FTP_DISABLED index 34e0c3b1fa..12d061c03f 100644 --- a/errors/Spanish/ERR_FTP_DISABLED +++ b/errors/Spanish/ERR_FTP_DISABLED @@ -17,5 +17,5 @@ Servicio FTP deshabilitado

    -Esta caché no proporciona servicio caché para el protocolo FTP. +Esta caché no proporciona servicio caché para el protocolo FTP.

    diff --git a/errors/Spanish/ERR_FTP_FAILURE b/errors/Spanish/ERR_FTP_FAILURE index 5d2fa4ebc6..210a2dfb64 100644 --- a/errors/Spanish/ERR_FTP_FAILURE +++ b/errors/Spanish/ERR_FTP_FAILURE @@ -5,17 +5,13 @@

    El URL solicitado no se ha podido conseguir


    -Ha ocurrido un error de protocolo FTP: - +Ha ocurrido un error de protocolo FTP mientras se intentaba traer el documento con URL: %U

    -Squid envió la siguiente orden FTP: +Squid envió la siguiente orden FTP:

    %f
    -y recibió la siguiente respuesta: +y recibió la siguiente respuesta:
    %F
    %g

    -Esto puede ser causado por una URL de protocolo FTP con una ruta -de directorios absoluto(que no cumple el RFC 1738). Si esta es la -causa, el fichero podría encontrarse en %B. diff --git a/errors/Spanish/ERR_FTP_FORBIDDEN b/errors/Spanish/ERR_FTP_FORBIDDEN index 5f1b5e9843..a7e71c943c 100644 --- a/errors/Spanish/ERR_FTP_FORBIDDEN +++ b/errors/Spanish/ERR_FTP_FORBIDDEN @@ -1,18 +1,18 @@ -ERROR: The requested URL could not be retrieved +ERROR: El URL solicitado no se ha podido conseguir

    ERROR

    -

    The requested URL could not be retrieved

    +

    El URL solicitado no se ha podido conseguir


    -An FTP authentication failure occurred -while trying to retrieve the URL: +Ha ocurrido una falla de autenticación cuando se +trataba de conseguir el URL: %U

    -Squid sent the following FTP command: -

    %f
    -and then received this reply -
    %F
    -
    %g
    -

    +Squid envió el siguiente comando FTP: +
    %f
    +y recibió esta respuesta +
    %F
    +
    %g
    + diff --git a/errors/Spanish/ERR_FTP_NOT_FOUND b/errors/Spanish/ERR_FTP_NOT_FOUND index a9b3f04406..9e73edc423 100644 --- a/errors/Spanish/ERR_FTP_NOT_FOUND +++ b/errors/Spanish/ERR_FTP_NOT_FOUND @@ -1,21 +1,21 @@ -ERROR: The requested URL could not be retrieved +ERROR: El URL solicitado no se ha podido conseguir

    ERROR

    -

    The requested URL could not be retrieved

    +

    El URL solicitado no se ha podido conseguir


    -The following URL could not be retrieved: +El siguiente URL no pudo ser obtenido: %U

    -Squid sent the following FTP command: -

    %f
    -and then received this reply -
    %F
    -
    %g
    +Squid envió el siguiente comando FTP: +
    %f
    +y recibió esta respuesta +
    %F
    +
    %g

    -This might be caused by an FTP URL with an absolute path (which does -not comply with RFC 1738). If this is the cause, then the file -can be found at %B. +Esto puede ser causado por un URL FTP con un camino absoluto (y por +lo tanto no cumple con RFC 1738). Si este es la causa, entonces el +archivo puede ser obtenido en %B.

    diff --git a/errors/Spanish/ERR_FTP_PUT_CREATED b/errors/Spanish/ERR_FTP_PUT_CREATED new file mode 100644 index 0000000000..860151d5f9 --- /dev/null +++ b/errors/Spanish/ERR_FTP_PUT_CREATED @@ -0,0 +1,9 @@ + +FTP PUT Exitoso: Archivo Creado + +

    Operación Exitosa

    +

    Archivo creado

    +
    +

    +

    + diff --git a/errors/Spanish/ERR_FTP_PUT_ERROR b/errors/Spanish/ERR_FTP_PUT_ERROR new file mode 100644 index 0000000000..296b0db5d3 --- /dev/null +++ b/errors/Spanish/ERR_FTP_PUT_ERROR @@ -0,0 +1,24 @@ + +ERROR: falla en envio FTP + +

    ERROR

    +

    FTP PUT/envio fallido

    +
    +

    +Mientras se intentaba hacer un PUT del siguiente URL: +%U +

    +Squid envió el siguiente comando FTP: +

    
    +        %f
    +
    +y recibió esta respuesta +
    
    +        %F
    +
    +

    +Esto significa que: +

    +Chequee el camino, los permisos, espacio en disco e intente nuevamente.
    +
    + diff --git a/errors/Spanish/ERR_FTP_PUT_MODIFIED b/errors/Spanish/ERR_FTP_PUT_MODIFIED new file mode 100644 index 0000000000..0d61f644c3 --- /dev/null +++ b/errors/Spanish/ERR_FTP_PUT_MODIFIED @@ -0,0 +1,9 @@ + +FTP PUT Exitoso: Archivo actualizado + +

    Operación Exitosa

    +

    Archivo actualizado

    +
    +

    +

    + diff --git a/errors/Spanish/ERR_FTP_UNAVAILABLE b/errors/Spanish/ERR_FTP_UNAVAILABLE index d330368f3b..70dbfa072e 100644 --- a/errors/Spanish/ERR_FTP_UNAVAILABLE +++ b/errors/Spanish/ERR_FTP_UNAVAILABLE @@ -1,17 +1,17 @@ -ERROR: The requested URL could not be retrieved +ERROR: El URL solicitado no se ha podido conseguir

    ERROR

    -

    The requested URL could not be retrieved

    +

    EL solicitado no se ha podido conseguir


    -The FTP server was too busy while trying to retrieve the URL: +El servidor FTP estaba muy ocupado cuando se intentaba obtener el URL: %U

    -Squid sent the following FTP command: -

    %f
    -and then received this reply -
    %F
    -
    %g
    -

    +Squid envió el siguiente comando FTP: +
    %f
    +y recibió esta respuesta +
    %F
    +
    %g
    + diff --git a/errors/Spanish/ERR_INVALID_REQ b/errors/Spanish/ERR_INVALID_REQ index 7f3974985c..4d801c2c68 100644 --- a/errors/Spanish/ERR_INVALID_REQ +++ b/errors/Spanish/ERR_INVALID_REQ @@ -14,19 +14,21 @@ Ha ocurrido el siguiente problema:
    • -Petición no válida. +Petición no válida.

    -Algún aspecto de la petición HTTP no es válido. Posibles problemas: +Algún aspecto de la petición HTTP no es válido. +Posibles problemas:

      -
    • Falta o es desconocido el método de la petición (no es GET ni POST) +
    • Falta o es desconocido el método de la petición (no es + GET ni POST)
    • Falta el URL
    • Falta el identificador HTTP (HTTP/1.0) -
    • La petición es demasiado grande. -
    • Hay caracteres ilegales en el nombre de máquina; - el carácter subrayado (_) no está permitido. +
    • La petición es demasiado grande. +
    • Hay caracteres ilegales en el nombre de máquina; + el carácter subrayado (_) no está permitido.
    -

    + diff --git a/errors/Spanish/ERR_INVALID_URL b/errors/Spanish/ERR_INVALID_URL index dd2655dc2a..5a425cf298 100644 --- a/errors/Spanish/ERR_INVALID_URL +++ b/errors/Spanish/ERR_INVALID_URL @@ -17,12 +17,12 @@ URL incorrecta.

    -Algún aspecto del URL solicitado es incorrecto. Posibles problemas: +Algún aspecto del URL solicitado es incorrecto. Posibles problemas:

      -
    • Falta o es incorrecto el protocolo de acceso (debe ser `http://'' o similar) -
    • Falta el nombre de la máquina -
    • Hay un doble-escape ilegas en la ruta de la URL -
    • Hay caracteres ilegales en el nombre de máquina; - el caracter de subrayado no está permitido +
    • Falta o es incorrecto el protocolo de acceso (debe ser ``http://'' o + similar) +
    • Falta el nombre de la máquina +
    • Hay un doble-escape ilegal en la ruta de la URL +
    • Hay caracteres ilegales en el nombre de máquina; + el caracter de subrayado (_) no está permitido
    -

    diff --git a/errors/Spanish/ERR_LIFETIME_EXP b/errors/Spanish/ERR_LIFETIME_EXP index 4507fcead9..c8e39d2a4d 100644 --- a/errors/Spanish/ERR_LIFETIME_EXP +++ b/errors/Spanish/ERR_LIFETIME_EXP @@ -12,12 +12,12 @@ Ha ocurrido el siguiente problema:
    • -Ha expirado el tiempo de vida de la conexión +Ha expirado el tiempo de vida de la conexión

    -Squid ha dado por terminada la petición porque se ha excedido -el tiempo de vida máximo para una conexión. +Squid ha dado por terminada la petición porque se ha excedido +el tiempo de vida máximo para una conexión.

    diff --git a/errors/Spanish/ERR_NO_RELAY b/errors/Spanish/ERR_NO_RELAY index da980c8ce3..f12c124bd7 100644 --- a/errors/Spanish/ERR_NO_RELAY +++ b/errors/Spanish/ERR_NO_RELAY @@ -17,5 +17,5 @@ No hay una pasarela para protocolo Wais.

    -Esta caché no tiene definido ninguna pasarela para el protocolo WAIS !! -¡Gríteselo al administrador de la caché ! +Esta caché no tiene definido ninguna pasarela para el protocolo WAIS !! +¡Gríteselo al administrador de la caché ! diff --git a/errors/Spanish/ERR_ONLY_IF_CACHED_MISS b/errors/Spanish/ERR_ONLY_IF_CACHED_MISS new file mode 100644 index 0000000000..3f7500f909 --- /dev/null +++ b/errors/Spanish/ERR_ONLY_IF_CACHED_MISS @@ -0,0 +1,27 @@ + +ERROR: El URL solicitado no se ha podido conseguir + +

    ERROR

    +

    El URL solicitado no se ha podido conseguir

    +
    +

    +Mientras se intentaba traer el URL: +%U +

    +Ha ocurrido el siguiente problema: +

      +
    • + +No se encontro un documento válido en la caché y se +especificó la directiva only-if-cached. + +
    + +

    + +Ud. ha enviado una solicitud con la directiva de control de la caché +only-if-cached. El documento no fue encontrado en la caché, +o requiere revalidación prohibida por la directiva +only-if-cached. + +

    diff --git a/errors/Spanish/ERR_READ_ERROR b/errors/Spanish/ERR_READ_ERROR index 3a0d58840c..535530703c 100644 --- a/errors/Spanish/ERR_READ_ERROR +++ b/errors/Spanish/ERR_READ_ERROR @@ -19,9 +19,8 @@ Error de lectura

    El sistema ha devuelto el siguiente mensaje:

        %E
    -

    -Ha ocurrido algún problema mientras se leían datos de la red. -Por favor, inténtelo de nuevo. +Ha ocurrido algún problema mientras se leían datos de la red. +Por favor, inténtelo de nuevo.

    diff --git a/errors/Spanish/ERR_READ_TIMEOUT b/errors/Spanish/ERR_READ_TIMEOUT index d1358d6514..a666a31be8 100644 --- a/errors/Spanish/ERR_READ_TIMEOUT +++ b/errors/Spanish/ERR_READ_TIMEOUT @@ -12,7 +12,7 @@ Ha ocurrido el siguiente problema:
    • -Se acabó el tiempo máximo para lectura de datos de la red. +Se acabó el tiempo máximo para lectura de datos de la red.
    @@ -21,6 +21,7 @@ El sistema ha devuelto el siguiente mensaje:
        %E

    -Se acabó el tiempo máximo de lectura mientras se leían datos de la red. -La red o el servidor pueden estar congestionados. Por favor, inténtelo de nuevo +Se acabó el tiempo máximo de lectura mientras se leían +datos de la red. La red o el servidor pueden estar congestionados. Por +favor, inténtelo de nuevo

    diff --git a/errors/Spanish/ERR_SHUTTING_DOWN b/errors/Spanish/ERR_SHUTTING_DOWN index 3cc9afaa05..63de81a0ac 100644 --- a/errors/Spanish/ERR_SHUTTING_DOWN +++ b/errors/Spanish/ERR_SHUTTING_DOWN @@ -1,17 +1,18 @@ -ERROR: The requested URL could not be retrieved +ERROR: El URL solicitado no se ha podido conseguir

    ERROR

    -

    The requested URL could not be retrieved

    +

    El URL solicitado no se ha podido conseguir


    -While trying to retrieve the URL: +Mientras se intentaba traer el URL: %U

    -The following error was encountered: +Ha ocurrido el siguiente problema:

      -

      -This cache is in the process of shutting down and can not -service your request at this time. Please retry your -request again soon. -

      + +Esta caché esta siendo desactivado y no puede atender su +solicitud en este momento. Por favor reintente su solicitud +nuevamente más tarde. + +
    diff --git a/errors/Spanish/ERR_SOCKET_FAILURE b/errors/Spanish/ERR_SOCKET_FAILURE index 7596fa54ed..ce79323f72 100644 --- a/errors/Spanish/ERR_SOCKET_FAILURE +++ b/errors/Spanish/ERR_SOCKET_FAILURE @@ -22,4 +22,4 @@ El sistema ha devuelto el siguiente mensaje:

    Squid ha sido incapaz de crear un puerto TCP, posiblemente debido -al exceso de carga. Inténtelo de nuevo, por favor. +al exceso de carga. Inténtelo de nuevo, por favor. diff --git a/errors/Spanish/ERR_UNSUP_REQ b/errors/Spanish/ERR_UNSUP_REQ index db599d0ca5..b768b70fe5 100644 --- a/errors/Spanish/ERR_UNSUP_REQ +++ b/errors/Spanish/ERR_UNSUP_REQ @@ -12,10 +12,10 @@ Ha ocurrido el siguiente problema:

    • -Método de la petición y protocolo no soportados +Método de la petición y protocolo no soportados

    -Squid no admite todos los métodos para todos los protocolos de acceso. +Squid no admite todos los métodos para todos los protocolos de acceso. Por ejemplo, no se puede hacer un POST a un servidor Gopher. diff --git a/errors/Spanish/ERR_URN_RESOLVE b/errors/Spanish/ERR_URN_RESOLVE index 1a12458606..d01c90dcca 100644 --- a/errors/Spanish/ERR_URN_RESOLVE +++ b/errors/Spanish/ERR_URN_RESOLVE @@ -17,5 +17,5 @@ No se puede resolver el URN

    -Hey, no espere mucho sobre URNs en %T :) +Hey, no espere mucho de URNs en %T :)

    diff --git a/errors/Spanish/ERR_WRITE_ERROR b/errors/Spanish/ERR_WRITE_ERROR index 712dbbaa77..dcda5151e0 100644 --- a/errors/Spanish/ERR_WRITE_ERROR +++ b/errors/Spanish/ERR_WRITE_ERROR @@ -21,5 +21,5 @@ El sistema ha devuelto el siguiente mensaje:
        %E

    -Se ha producido un error mientras se escribían datos en la red. -Por favor, inténtelo de nuevo. +Se ha producido un error mientras se escribían datos en la red. +Por favor, inténtelo de nuevo. diff --git a/errors/Spanish/ERR_ZERO_SIZE_OBJECT b/errors/Spanish/ERR_ZERO_SIZE_OBJECT index 6256c92683..a5535ffe54 100644 --- a/errors/Spanish/ERR_ZERO_SIZE_OBJECT +++ b/errors/Spanish/ERR_ZERO_SIZE_OBJECT @@ -12,10 +12,11 @@ Ha ocurrido el siguiente problema:

    • -Respuesta vacía (tamaño cero) +Respuesta vacía (tamaño cero)

    -Squid no ha recibido ninguna información en respuesta a esta petición. +Squid no ha recibido ninguna información en respuesta a esta +petición.

    diff --git a/errors/Spanish/README b/errors/Spanish/README index 051b3f0299..163e8d56e4 100644 --- a/errors/Spanish/README +++ b/errors/Spanish/README @@ -1,5 +1,3 @@ -Thanks to Javier Puche , -and Roberto Lumbreras +Thanks to Javier Puche , Roberto Lumbreras +, and Juan Nicolas Ruiz N. for creating these error pages in Spanish! - -Note: character set is ISO-LATIN-1 diff --git a/include/autoconf.h.in b/include/autoconf.h.in index 8ab6bfa692..536c9cef7c 100644 --- a/include/autoconf.h.in +++ b/include/autoconf.h.in @@ -70,6 +70,9 @@ /* Define to use async disk I/O operations */ #undef USE_ASYNC_IO +/* Defines how many threads to use for async I/O */ +#undef NUMTHREADS + /* * If you want to use Squid's ICMP features (highly recommended!) then * define this. When USE_ICMP is defined, Squid will send ICMP pings @@ -267,6 +270,11 @@ */ #undef USE_TRUNCATE +/* + * Allow underscores in host names + */ +#undef ALLOW_HOSTNAME_UNDERSCORES + /* The number of bytes in a int. */ #undef SIZEOF_INT diff --git a/include/heap.h b/include/heap.h index 2d9243740f..e7b4e5e123 100644 --- a/include/heap.h +++ b/include/heap.h @@ -1,5 +1,5 @@ /* - * $Id: heap.h,v 1.1 1999/06/24 20:17:03 wessels Exp $ + * $Id: heap.h,v 1.2 1999/10/04 05:04:48 wessels Exp $ * * AUTHOR: John Dilley, Hewlett Packard * @@ -28,6 +28,8 @@ */ /**************************************************************************** + * Copyright (C) 1999 by Hewlett Packard + * * Heap data structure. Used to store objects for cache replacement. The * heap is implemented as a contiguous array in memory. Heap sort and heap * update are done in-place. The heap is ordered with the smallest value at diff --git a/include/splay.h b/include/splay.h index 5342997a9c..4f885a00c0 100644 --- a/include/splay.h +++ b/include/splay.h @@ -1,5 +1,5 @@ /* - * $Id: splay.h,v 1.7 1998/09/23 17:20:05 wessels Exp $ + * $Id: splay.h,v 1.8 1999/10/04 05:04:49 wessels Exp $ */ @@ -9,8 +9,8 @@ typedef struct _splay_node { struct _splay_node *right; } splayNode; -typedef int SPLAYCMP(const void *, splayNode *); -typedef void SPLAYWALKEE(void *, void *); +typedef int SPLAYCMP(const void *a, const void *b); +typedef void SPLAYWALKEE(void *nodedata, void *state); typedef void SPLAYFREE(void *); extern int splayLastResult; diff --git a/include/util.h b/include/util.h index 5676864a78..fd7f43eab2 100644 --- a/include/util.h +++ b/include/util.h @@ -1,5 +1,5 @@ /* - * $Id: util.h,v 1.53 1999/01/29 19:39:09 wessels Exp $ + * $Id: util.h,v 1.54 1999/10/04 05:04:49 wessels Exp $ * * AUTHOR: Harvest Derived * @@ -80,6 +80,8 @@ extern void xxfree(void *); /* rfc1738.c */ extern char *rfc1738_escape(const char *); +extern char *rfc1738_escape_unescaped(const char *); +extern char *rfc1738_escape_part(const char *); extern void rfc1738_unescape(char *); #if XMALLOC_STATISTICS diff --git a/lib/heap.c b/lib/heap.c index b1dd42797f..199c942b39 100644 --- a/lib/heap.c +++ b/lib/heap.c @@ -1,6 +1,6 @@ /* - * $Id: heap.c,v 1.3 1999/07/05 21:27:07 wessels Exp $ + * $Id: heap.c,v 1.4 1999/10/04 05:04:50 wessels Exp $ * * AUTHOR: John Dilley, Hewlett Packard * @@ -34,6 +34,7 @@ /**************************************************************************** * Heap implementation + * Copyright (C) 1999 by Hewlett Packard ****************************************************************************/ #include "config.h" @@ -97,11 +98,11 @@ new_heap(int initSize, heap_key_func gen_key) heap *hp = malloc(sizeof(*hp)); assert(hp != NULL); + if (initSize <= 0) + initSize = MinSize; hp->nodes = calloc(initSize, sizeof(heap_node *)); assert(hp->nodes != NULL); - if (initSize <= 0) - initSize = MinSize; hp->size = initSize; hp->last = 0; hp->gen_key = gen_key; diff --git a/lib/rfc1035.c b/lib/rfc1035.c index fb333f9baa..fa69094497 100644 --- a/lib/rfc1035.c +++ b/lib/rfc1035.c @@ -1,6 +1,6 @@ /* - * $Id: rfc1035.c,v 1.9 1999/05/04 21:20:40 wessels Exp $ + * $Id: rfc1035.c,v 1.10 1999/10/04 05:04:51 wessels Exp $ * * Low level DNS protocol routines * AUTHOR: Duane Wessels @@ -414,7 +414,7 @@ rfc1035AnswersUnpack(const char *buf, /* skip question */ while (i--) { do { - l = (int) *(buf + off); + l = (int) (unsigned char) *(buf + off); off++; if (l > RFC1035_MAXLABELSZ) { /* compression */ off++; @@ -422,9 +422,16 @@ rfc1035AnswersUnpack(const char *buf, } else { off += l; } - } while (l > 0); + } while (l > 0); /* a zero-length label terminates */ off += 4; /* qtype, qclass */ - assert(off <= sz); + if (off > sz) { + /* + * This used be an assertion and it triggered once, but the + * core file was useless for debugging. Sigh, I guess we + * need a debug_hook. + */ + return 0; + } } i = (int) hdr.ancount; if (i == 0) diff --git a/lib/rfc1738.c b/lib/rfc1738.c index becc651a5c..fef98cbcfb 100644 --- a/lib/rfc1738.c +++ b/lib/rfc1738.c @@ -1,5 +1,5 @@ /* - * $Id: rfc1738.c,v 1.19 1999/05/04 21:20:40 wessels Exp $ + * $Id: rfc1738.c,v 1.20 1999/10/04 05:04:51 wessels Exp $ * * DEBUG: * AUTHOR: Harvest Derived @@ -54,7 +54,9 @@ static char rfc1738_unsafe_chars[] = (char) 0x3E, /* > */ (char) 0x22, /* " */ (char) 0x23, /* # */ +#if 0 /* done in code */ (char) 0x25, /* % */ +#endif (char) 0x7B, /* { */ (char) 0x7D, /* } */ (char) 0x7C, /* | */ @@ -68,12 +70,23 @@ static char rfc1738_unsafe_chars[] = (char) 0x20 /* space */ }; +static char rfc1738_reserved_chars[] = +{ + (char) 0x3b, /* ; */ + (char) 0x2f, /* / */ + (char) 0x3f, /* ? */ + (char) 0x3a, /* : */ + (char) 0x40, /* @ */ + (char) 0x3d, /* = */ + (char) 0x26 /* & */ +}; + /* * rfc1738_escape - Returns a static buffer contains the RFC 1738 * compliant, escaped version of the given url. */ -char * -rfc1738_escape(const char *url) +static char * +rfc1738_do_escape(const char *url, int encode_reserved) { static char *buf; static size_t bufsize = 0; @@ -96,6 +109,16 @@ rfc1738_escape(const char *url) break; } } + /* Handle % separately */ + if (encode_reserved >= 0 && *p == '%') + do_escape = 1; + /* RFC 1738 defines these chars as reserved */ + for (i = 0; i < sizeof(rfc1738_reserved_chars) && encode_reserved > 0; i++) { + if (*p == rfc1738_reserved_chars[i]) { + do_escape = 1; + break; + } + } /* RFC 1738 says any control chars (0x00-0x1F) are encoded */ if ((unsigned char) *p <= (unsigned char) 0x1F) { do_escape = 1; @@ -124,6 +147,36 @@ rfc1738_escape(const char *url) return (buf); } +/* + * rfc1738_escape - Returns a static buffer that contains the RFC + * 1738 compliant, escaped version of the given url. + */ +char * +rfc1738_escape(const char *url) +{ + return rfc1738_do_escape(url, 0); +} + +/* + * rfc1738_escape_unescaped - Returns a static buffer that contains + * the RFC 1738 compliant, escaped version of the given url. + */ +char * +rfc1738_escape_unescaped(const char *url) +{ + return rfc1738_do_escape(url, -1); +} + +/* + * rfc1738_escape_part - Returns a static buffer that contains the + * RFC 1738 compliant, escaped version of the given url segment. + */ +char * +rfc1738_escape_part(const char *url) +{ + return rfc1738_do_escape(url, 1); +} + /* * rfc1738_unescape() - Converts escaped characters (%xy numbers) in * given the string. %% is a %. %ab is the 8-bit hexadecimal number "ab" diff --git a/lib/splay.c b/lib/splay.c index 78a207b2f8..04ad32e6c7 100644 --- a/lib/splay.c +++ b/lib/splay.c @@ -1,5 +1,5 @@ /* - * $Id: splay.c,v 1.11 1999/05/04 21:20:42 wessels Exp $ + * $Id: splay.c,v 1.12 1999/10/04 05:04:52 wessels Exp $ */ #include "config.h" @@ -59,11 +59,11 @@ splay_splay(const void *data, splayNode * top, SPLAYCMP * compare) l = r = &N; for (;;) { - splayLastResult = compare(data, top); + splayLastResult = compare(data, top->data); if (splayLastResult < 0) { if (top->left == NULL) break; - if ((splayLastResult = compare(data, top->left)) < 0) { + if ((splayLastResult = compare(data, top->left->data)) < 0) { y = top->left; /* rotate right */ top->left = y->right; y->right = top; @@ -77,7 +77,7 @@ splay_splay(const void *data, splayNode * top, SPLAYCMP * compare) } else if (splayLastResult > 0) { if (top->right == NULL) break; - if ((splayLastResult = compare(data, top->right)) > 0) { + if ((splayLastResult = compare(data, top->right->data)) > 0) { y = top->right; /* rotate left */ top->right = y->left; y->left = top; @@ -115,25 +115,39 @@ splay_walk(splayNode * top, SPLAYWALKEE * walkee, void *state) { if (top->left) splay_walk(top->left, walkee, state); + walkee(top->data, state); if (top->right) splay_walk(top->right, walkee, state); - walkee(top->data, state); } +#ifdef DEBUG +void +splay_dump_entry(void *data, int depth) +{ + printf("%*s%s\n", depth, "", (char *) data); +} - -#ifdef DRIVER +static void +splay_do_dump(splayNode * top, void printfunc(void *data, int depth), int depth) +{ + if (!top) + return; + splay_do_dump(top->left, printfunc, depth + 1); + printfunc(top->data, depth); + splay_do_dump(top->right, printfunc, depth + 1); +} void -splay_print(splayNode * top, void (*printfunc) ()) +splay_dump(splayNode * top, void printfunc(void *data, int depth)) { - if (top == NULL) - return; - splay_print(top->left, printfunc); - printfunc(top->data); - splay_print(top->right, printfunc); + splay_do_dump(top, printfunc, 0); } + +#endif + +#ifdef DRIVER + typedef struct { int i; } intnode; @@ -147,10 +161,10 @@ compareint(void *a, splayNode * n) } void -printint(void *a) +printint(void *a, void *state) { intnode *A = a; - printf("%d\n", A->i); + printf("%d\n", "", A->i); } main(int argc, char *argv[]) @@ -164,7 +178,7 @@ main(int argc, char *argv[]) I->i = random(); top = splay_insert(I, top, compareint); } - splay_print(top, printint); + splay_walk(top, printint, NULL); return 0; } #endif /* DRIVER */ diff --git a/scripts/access-log-matrix.pl b/scripts/access-log-matrix.pl index a38229687f..1d66b276aa 100755 --- a/scripts/access-log-matrix.pl +++ b/scripts/access-log-matrix.pl @@ -47,9 +47,9 @@ while (<>) { print ' HOSTNAME: '. `hostname`; ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdat) = localtime($first); -printf "FIRST LOG ENTRY: %s/%s/%s %.2d:%.2d:%.2d\n", $year,$mon+1,$mday, $hour,$min,$sec; +printf "FIRST LOG ENTRY: %04d/%02d/%02d %.2d:%.2d:%.2d\n", $year+1900,$mon+1,$mday, $hour,$min,$sec; ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdat) = localtime($last); -printf " LAST LOG ENTRY: %s/%s/%s %.2d:%.2d:%.2d\n", $year,$mon+1,$mday, $hour,$min,$sec; +printf " LAST LOG ENTRY: %04d/%02d/%02d %.2d:%.2d:%.2d\n", $year+1900,$mon+1,$mday, $hour,$min,$sec; print "\n"; printf ("%25.25s %5s %5s %5s %5s %5s %5s %5s %5s\n", diff --git a/src/HttpHdrCc.cc b/src/HttpHdrCc.cc index db16a0aed6..68d7e33cc3 100644 --- a/src/HttpHdrCc.cc +++ b/src/HttpHdrCc.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrCc.cc,v 1.17 1999/05/31 05:03:35 rousskov Exp $ + * $Id: HttpHdrCc.cc,v 1.18 1999/10/04 05:04:53 wessels Exp $ * * DEBUG: section 65 HTTP Cache Control Header * AUTHOR: Alex Rousskov @@ -58,13 +58,13 @@ static int httpHdrCcParseInit(HttpHdrCc * cc, const String * str); /* module initialization */ void -httpHdrCcInitModule() +httpHdrCcInitModule(void) { CcFieldsInfo = httpHeaderBuildFieldsInfo(CcAttrs, CC_ENUM_END); } void -httpHdrCcCleanModule() +httpHdrCcCleanModule(void) { httpHeaderDestroyFieldsInfo(CcFieldsInfo, CC_ENUM_END); CcFieldsInfo = NULL; @@ -73,7 +73,7 @@ httpHdrCcCleanModule() /* implementation */ HttpHdrCc * -httpHdrCcCreate() +httpHdrCcCreate(void) { HttpHdrCc *cc = memAllocate(MEM_HTTP_HDR_CC); cc->max_age = -1; diff --git a/src/HttpHdrContRange.cc b/src/HttpHdrContRange.cc index b9b41b8682..4a0b1ef539 100644 --- a/src/HttpHdrContRange.cc +++ b/src/HttpHdrContRange.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrContRange.cc,v 1.10 1998/12/05 00:54:09 wessels Exp $ + * $Id: HttpHdrContRange.cc,v 1.11 1999/10/04 05:04:54 wessels Exp $ * * DEBUG: section 68 HTTP Content-Range Header * AUTHOR: Alex Rousskov @@ -81,7 +81,7 @@ httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec * spec, const char *field, int fl p++; /* do we have last-pos ? */ if (p - field < flen) { - size_t last_pos; + ssize_t last_pos; if (!httpHeaderParseSize(p, &last_pos)) return 0; spec->length = size_diff(last_pos + 1, spec->offset); @@ -110,7 +110,7 @@ httpHdrRangeRespSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p) */ HttpHdrContRange * -httpHdrContRangeCreate() +httpHdrContRangeCreate(void) { HttpHdrContRange *r = memAllocate(MEM_HTTP_HDR_CONTENT_RANGE); r->spec.offset = r->spec.length = range_spec_unknown; @@ -187,7 +187,7 @@ httpHdrContRangePackInto(const HttpHdrContRange * range, Packer * p) } void -httpHdrContRangeSet(HttpHdrContRange * cr, HttpHdrRangeSpec spec, size_t ent_len) +httpHdrContRangeSet(HttpHdrContRange * cr, HttpHdrRangeSpec spec, ssize_t ent_len) { assert(cr && ent_len >= 0); cr->spec = spec; diff --git a/src/HttpHdrRange.cc b/src/HttpHdrRange.cc index bac4e6e50d..7c173230db 100644 --- a/src/HttpHdrRange.cc +++ b/src/HttpHdrRange.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrRange.cc,v 1.19 1999/01/19 23:16:48 wessels Exp $ + * $Id: HttpHdrRange.cc,v 1.20 1999/10/04 05:04:54 wessels Exp $ * * DEBUG: section 64 HTTP Range Header * AUTHOR: Alex Rousskov @@ -55,7 +55,7 @@ /* local constants */ -#define range_spec_unknown ((size_t)-1) +#define range_spec_unknown ((ssize_t)-1) /* local routines */ #define known_spec(s) ((s) != range_spec_unknown) @@ -73,7 +73,7 @@ static int RangeParsedCount = 0; */ static HttpHdrRangeSpec * -httpHdrRangeSpecCreate() +httpHdrRangeSpecCreate(void) { return memAllocate(MEM_HTTP_HDR_RANGE_SPEC); } @@ -102,7 +102,7 @@ httpHdrRangeSpecParseCreate(const char *field, int flen) p++; /* do we have last-pos ? */ if (p - field < flen) { - size_t last_pos; + ssize_t last_pos; if (!httpHeaderParseSize(p, &last_pos)) return NULL; spec.length = size_diff(last_pos + 1, spec.offset); @@ -209,7 +209,7 @@ httpHdrRangeSpecMergeWith(HttpHdrRangeSpec * recep, const HttpHdrRangeSpec * don */ HttpHdrRange * -httpHdrRangeCreate() +httpHdrRangeCreate(void) { HttpHdrRange *r = memAllocate(MEM_HTTP_HDR_RANGE); stackInit(&r->specs); @@ -303,7 +303,7 @@ httpHdrRangePackInto(const HttpHdrRange * range, Packer * p) * - there is at least one range spec */ int -httpHdrRangeCanonize(HttpHdrRange * range, size_t clen) +httpHdrRangeCanonize(HttpHdrRange * range, ssize_t clen) { int i; HttpHdrRangeSpec *spec; @@ -386,8 +386,10 @@ httpHdrRangeIsComplex(const HttpHdrRange * range) return 0; } -/* hack: returns true if range specs may be too "complex" when "canonized" */ -/* see also: httpHdrRangeIsComplex */ +/* + * hack: returns true if range specs may be too "complex" when "canonized". + * see also: httpHdrRangeIsComplex. + */ int httpHdrRangeWillBeComplex(const HttpHdrRange * range) { @@ -409,12 +411,14 @@ httpHdrRangeWillBeComplex(const HttpHdrRange * range) return 0; } -/* Returns lowest known offset in range spec(s), or range_spec_unknown */ -/* this is used for size limiting */ -size_t +/* + * Returns lowest known offset in range spec(s), or range_spec_unknown + * this is used for size limiting + */ +ssize_t httpHdrRangeFirstOffset(const HttpHdrRange * range) { - size_t offset = range_spec_unknown; + ssize_t offset = range_spec_unknown; HttpHdrRangePos pos = HttpHdrRangeInitPos; const HttpHdrRangeSpec *spec; assert(range); @@ -425,16 +429,17 @@ httpHdrRangeFirstOffset(const HttpHdrRange * range) return offset; } -/* Returns lowest offset in range spec(s), 0 if unknown */ -/* This is used for finding out where we need to start if all +/* + * Returns lowest offset in range spec(s), 0 if unknown. + * This is used for finding out where we need to start if all * ranges are combined into one, for example FTP REST. * Use 0 for size if unknown */ -size_t -httpHdrRangeLowestOffset(const HttpHdrRange * range, size_t size) +ssize_t +httpHdrRangeLowestOffset(const HttpHdrRange * range, ssize_t size) { - size_t offset = range_spec_unknown; - size_t current; + ssize_t offset = range_spec_unknown; + ssize_t current; HttpHdrRangePos pos = HttpHdrRangeInitPos; const HttpHdrRangeSpec *spec; assert(range); diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc index 832fbc9027..16cd9d0223 100644 --- a/src/HttpHeader.cc +++ b/src/HttpHeader.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHeader.cc,v 1.63 1999/05/04 19:43:17 wessels Exp $ + * $Id: HttpHeader.cc,v 1.64 1999/10/04 05:04:55 wessels Exp $ * * DEBUG: section 55 HTTP Header * AUTHOR: Alex Rousskov @@ -141,6 +141,7 @@ static http_hdr_type ListHeadersArr[] = HDR_CONNECTION, HDR_IF_MATCH, HDR_IF_NONE_MATCH, HDR_LINK, HDR_PRAGMA, + HDR_PROXY_CONNECTION, /* HDR_TRANSFER_ENCODING, */ HDR_UPGRADE, HDR_VARY, @@ -227,7 +228,7 @@ static void httpHeaderStatDump(const HttpHeaderStat * hs, StoreEntry * e); */ void -httpHeaderInitModule() +httpHeaderInitModule(void) { int i; /* check that we have enough space for masks */ @@ -264,7 +265,7 @@ httpHeaderInitModule() } void -httpHeaderCleanModule() +httpHeaderCleanModule(void) { httpHeaderDestroyFieldsInfo(Headers, HDR_ENUM_END); Headers = NULL; diff --git a/src/HttpHeaderTools.cc b/src/HttpHeaderTools.cc index 310e6592ad..926f50746e 100644 --- a/src/HttpHeaderTools.cc +++ b/src/HttpHeaderTools.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHeaderTools.cc,v 1.25 1999/04/15 06:15:41 wessels Exp $ + * $Id: HttpHeaderTools.cc,v 1.26 1999/10/04 05:04:56 wessels Exp $ * * DEBUG: section 66 HTTP Header Tools * AUTHOR: Alex Rousskov @@ -97,17 +97,18 @@ httpHeaderCalcMask(HttpHeaderMask * mask, const int *enums, int count) } /* same as httpHeaderPutStr, but formats the string using snprintf first */ -#if STDC_HEADERS void +#if STDC_HEADERS httpHeaderPutStrf(HttpHeader * hdr, http_hdr_type id, const char *fmt,...) -{ - va_list args; - va_start(args, fmt); #else -void httpHeaderPutStrf(va_alist) va_dcl +#endif { +#if STDC_HEADERS + va_list args; + va_start(args, fmt); +#else va_list args; HttpHeader *hdr = NULL; http_hdr_type id = HDR_ENUM_END; @@ -135,7 +136,7 @@ httpHeaderPutStrvf(HttpHeader * hdr, http_hdr_type id, const char *fmt, va_list /* wrapper arrounf PutContRange */ void -httpHeaderAddContRange(HttpHeader * hdr, HttpHdrRangeSpec spec, size_t ent_len) +httpHeaderAddContRange(HttpHeader * hdr, HttpHdrRangeSpec spec, ssize_t ent_len) { HttpHdrContRange *cr = httpHdrContRangeCreate(); assert(hdr && ent_len >= 0); @@ -146,23 +147,28 @@ httpHeaderAddContRange(HttpHeader * hdr, HttpHdrRangeSpec spec, size_t ent_len) /* - * return true if a given directive is found in at least one of the "connection" header-fields - * note: if HDR_PROXY_CONNECTION is present we ignore HDR_CONNECTION + * return true if a given directive is found in at least one of + * the "connection" header-fields note: if HDR_PROXY_CONNECTION is + * present we ignore HDR_CONNECTION. */ int httpHeaderHasConnDir(const HttpHeader * hdr, const char *directive) { - if (httpHeaderHas(hdr, HDR_PROXY_CONNECTION)) { - const char *str = httpHeaderGetStr(hdr, HDR_PROXY_CONNECTION); - return str && !strcasecmp(str, directive); - } - if (httpHeaderHas(hdr, HDR_CONNECTION)) { - String str = httpHeaderGetList(hdr, HDR_CONNECTION); - const int res = strListIsMember(&str, directive, ','); - stringClean(&str); - return res; - } - return 0; + String list; + http_hdr_type ht; + int res; + /* what type of header do we have? */ + if (httpHeaderHas(hdr, HDR_PROXY_CONNECTION)) + ht = HDR_PROXY_CONNECTION; + else if (httpHeaderHas(hdr, HDR_CONNECTION)) + ht = HDR_CONNECTION; + else + return 0; + + list = httpHeaderGetList(hdr, ht); + res = strListIsMember(&list, directive, ','); + stringClean(&list); + return res; } /* returns true iff "m" is a member of the list */ @@ -171,9 +177,12 @@ strListIsMember(const String * list, const char *m, char del) { const char *pos = NULL; const char *item; + int ilen = 0; + int mlen; assert(list && m); - while (strListGetItem(list, del, &item, NULL, &pos)) { - if (!strcasecmp(item, m)) + mlen = strlen(m); + while (strListGetItem(list, del, &item, &ilen, &pos)) { + if (mlen == ilen && !strncasecmp(item, m, ilen)) return 1; } return 0; @@ -183,6 +192,18 @@ strListIsMember(const String * list, const char *m, char del) int strListIsSubstr(const String * list, const char *s, char del) { + assert(list && del); + return strStr(*list, s) != 0; + + /* + * Note: the original code with a loop is broken because it uses strstr() + * instead of strnstr(). If 's' contains a 'del', strListIsSubstr() may + * return true when it should not. If 's' does not contain a 'del', the + * implementaion is equavalent to strstr()! Thus, we replace the loop with + * strstr() above until strnstr() is available. + */ + +#ifdef BROKEN_CODE const char *pos = NULL; const char *item; assert(list && s); @@ -191,6 +212,7 @@ strListIsSubstr(const String * list, const char *s, char del) return 1; } return 0; +#endif } /* appends an item to the list */ @@ -276,7 +298,7 @@ httpHeaderParseInt(const char *start, int *value) } int -httpHeaderParseSize(const char *start, size_t * value) +httpHeaderParseSize(const char *start, ssize_t * value) { int v; const int res = httpHeaderParseInt(start, &v); @@ -366,5 +388,6 @@ httpHeaderStrCmp(const char *h1, const char *h2, int len) if (c2) len2++; } + /* NOTREACHED */ return 0; } diff --git a/src/HttpReply.cc b/src/HttpReply.cc index 4d286cc0e1..3fd1adb101 100644 --- a/src/HttpReply.cc +++ b/src/HttpReply.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpReply.cc,v 1.37 1999/04/26 21:06:12 wessels Exp $ + * $Id: HttpReply.cc,v 1.38 1999/10/04 05:04:57 wessels Exp $ * * DEBUG: section 58 HTTP Reply (Response) * AUTHOR: Alex Rousskov @@ -60,7 +60,7 @@ static int httpReplyIsolateStart(const char **parse_start, const char **blk_star /* module initialization */ void -httpReplyInitModule() +httpReplyInitModule(void) { httpHeaderMaskInit(&Denied304HeadersMask, 0); httpHeaderCalcMask(&Denied304HeadersMask, (const int *) Denied304HeadersArr, countof(Denied304HeadersArr)); @@ -68,7 +68,7 @@ httpReplyInitModule() HttpReply * -httpReplyCreate() +httpReplyCreate(void) { HttpReply *rep = memAllocate(MEM_HTTP_REPLY); debug(58, 7) ("creating rep: %p\n", rep); @@ -125,21 +125,31 @@ httpReplyAbsorb(HttpReply * rep, HttpReply * new_rep) httpReplyDoDestroy(new_rep); } -/* parses a 4K buffer that may not be 0-terminated; returns true on success */ +/* + * httpReplyParse takes character buffer of HTTP headers (buf), + * which may not be NULL-terminated, and fills in an HttpReply + * structure (rep). The parameter 'end' specifies the offset to + * the end of the reply headers. The caller may know where the + * end is, but is unable to NULL-terminate the buffer. This function + * returns true on success. + */ int -httpReplyParse(HttpReply * rep, const char *buf) +httpReplyParse(HttpReply * rep, const char *buf, ssize_t end) { /* - * this extra buffer/copy will be eliminated when headers become meta-data - * in store. Currently we have to xstrncpy the buffer becuase store.c may - * feed a non 0-terminated buffer to us. + * this extra buffer/copy will be eliminated when headers become + * meta-data in store. Currently we have to xstrncpy the buffer + * becuase somebody may feed a non NULL-terminated buffer to + * us. */ char *headers = memAllocate(MEM_4K_BUF); int success; /* reset current state, because we are not used in incremental fashion */ httpReplyReset(rep); - /* put a 0-terminator */ + /* put a string terminator */ xstrncpy(headers, buf, 4096); + if (end >= 0 && end < 4096) + *(headers + end) = '\0'; success = httpReplyParseStep(rep, headers, 0); memFree(headers, MEM_4K_BUF); return success == 1; diff --git a/src/HttpRequest.cc b/src/HttpRequest.cc index b38a1bf13c..b869f662cf 100644 --- a/src/HttpRequest.cc +++ b/src/HttpRequest.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpRequest.cc,v 1.23 1999/01/29 23:39:11 wessels Exp $ + * $Id: HttpRequest.cc,v 1.24 1999/10/04 05:04:57 wessels Exp $ * * DEBUG: section 73 HTTP Request * AUTHOR: Duane Wessels @@ -44,6 +44,7 @@ requestCreate(method_t method, protocol_t protocol, const char *urlpath) if (urlpath) stringReset(&req->urlpath, urlpath); req->max_forwards = -1; + req->lastmod = -1; httpHeaderInit(&req->header, hoRequest); return req; } diff --git a/src/acl.cc b/src/acl.cc index 957bbb68db..a839e805ec 100644 --- a/src/acl.cc +++ b/src/acl.cc @@ -1,6 +1,6 @@ /* - * $Id: acl.cc,v 1.207 1999/08/02 06:18:28 wessels Exp $ + * $Id: acl.cc,v 1.208 1999/10/04 05:04:59 wessels Exp $ * * DEBUG: section 28 Access Control * AUTHOR: Duane Wessels @@ -175,6 +175,8 @@ aclStrToType(const char *s) return ACL_URL_REGEX; if (!strcmp(s, "port")) return ACL_URL_PORT; + if (!strcmp(s, "maxconn")) + return ACL_MAXCONN; #if USE_IDENT if (!strcmp(s, "ident")) return ACL_IDENT; @@ -229,6 +231,8 @@ aclTypeToStr(squid_acl type) return "url_regex"; if (type == ACL_URL_PORT) return "port"; + if (type == ACL_MAXCONN) + return "maxconn"; #if USE_IDENT if (type == ACL_IDENT) return "ident"; @@ -699,6 +703,7 @@ aclParseAclLine(acl ** head) aclParseRegexList(&A->data); break; case ACL_SRC_ASN: + case ACL_MAXCONN: case ACL_DST_ASN: case ACL_NETDB_SRC_RTT: aclParseIntlist(&A->data); @@ -1076,6 +1081,8 @@ aclMatchProxyAuth(wordlist * data, const char *proxy_auth, acl_proxy_auth_user * /* store validated user in hash, after filling in expiretime */ xstrncpy(checklist->request->user_ident, user, USER_IDENT_SZ); auth_user->expiretime = current_time.tv_sec + Config.authenticateTTL; + auth_user->ip_expiretime = squid_curtime + Config.authenticateIpTTL; + auth_user->ipaddr = checklist->src_addr; hash_join(proxy_auth_cache, (hash_link *) auth_user); /* Continue checking below, as normal */ } @@ -1089,12 +1096,26 @@ aclMatchProxyAuth(wordlist * data, const char *proxy_auth, acl_proxy_auth_user * return -1; } else if ((0 == strcmp(auth_user->passwd, password)) && (auth_user->expiretime > current_time.tv_sec)) { - /* user already known and valid */ - debug(28, 5) ("aclMatchProxyAuth: user '%s' previously validated\n", - user); - /* copy username to request for logging on client-side */ - xstrncpy(checklist->request->user_ident, user, USER_IDENT_SZ); - return aclMatchUser(data, user); + if (checklist->src_addr.s_addr == auth_user->ipaddr.s_addr + || auth_user->ip_expiretime <= squid_curtime) { + /* user already known and valid */ + debug(28, 5) ("aclMatchProxyAuth: user '%s' previously validated\n", + user); + /* Update IP ttl */ + auth_user->ip_expiretime = squid_curtime + Config.authenticateIpTTL; + auth_user->ipaddr = checklist->src_addr; + /* copy username to request for logging on client-side */ + xstrncpy(checklist->request->user_ident, user, USER_IDENT_SZ); + return aclMatchUser(data, user); + } else { + /* user has switched to another IP addr */ + debug(28, 1) ("aclMatchProxyAuth: user '%s' has changed IP address\n", user); + /* remove this user from the hash, making him unknown */ + hash_remove_link(proxy_auth_cache, (hash_link *) auth_user); + aclFreeProxyAuthUser(auth_user); + /* require the user to reauthenticate */ + return -2; + } } else { /* password mismatch/timeout */ debug(28, 4) ("aclMatchProxyAuth: user '%s' password mismatch/timeout\n", @@ -1338,6 +1359,10 @@ aclMatchAcl(acl * ae, aclCheck_t * checklist) safe_free(esc_buf); return k; /* NOTREACHED */ + case ACL_MAXCONN: + k = clientdbEstablished(checklist->src_addr, 0); + return ((k > ((intlist *) ae->data)->i) ? 0 : 1); + /* NOTREACHED */ case ACL_URL_PORT: return aclMatchIntegerRange(ae->data, r->port); /* NOTREACHED */ @@ -1813,6 +1838,7 @@ aclDestroyAcls(acl ** head) case ACL_SRC_ASN: case ACL_DST_ASN: case ACL_NETDB_SRC_RTT: + case ACL_MAXCONN: intlistDestroy((intlist **) & a->data); break; case ACL_URL_PORT: @@ -1895,10 +1921,10 @@ aclDestroyIntRange(intrange * list) /* compare two domains */ static int -aclDomainCompare(const void *data, splayNode * n) +aclDomainCompare(const void *a, const void *b) { - const char *d1 = data; - const char *d2 = n->data; + const char *d1 = a; + const char *d2 = b; int l1; int l2; while ('.' == *d1) @@ -1933,31 +1959,11 @@ aclDomainCompare(const void *data, splayNode * n) /* compare a host and a domain */ static int -aclHostDomainCompare(const void *data, splayNode * n) +aclHostDomainCompare(const void *a, const void *b) { - const char *h = data; - char *d = n->data; - int l1; - int l2; - if (matchDomainName(d, h)) - return 0; - l1 = strlen(h); - l2 = strlen(d); - /* h != d */ - while (xtolower(h[l1]) == xtolower(d[l2])) { - if (l1 == 0) - break; - if (l2 == 0) - break; - l1--; - l2--; - } - /* a '.' is a special case */ - if ((h[l1] == '.') || (l1 == 0)) - return -1; /* domain(h) < d */ - if ((d[l2] == '.') || (l2 == 0)) - return 1; /* domain(h) > d */ - return (xtolower(h[l1]) - xtolower(d[l2])); + const char *h = a; + const char *d = b; + return matchDomainName(d, h); } /* compare two network specs @@ -1974,12 +1980,12 @@ aclHostDomainCompare(const void *data, splayNode * n) /* compare an address and a network spec */ static int -aclIpNetworkCompare(const void *a, splayNode * n) +aclIpNetworkCompare(const void *a, const void *b) { - struct in_addr A = *(struct in_addr *) a; - acl_ip_data *q = n->data; - struct in_addr B = q->addr1; - struct in_addr C = q->addr2; + struct in_addr A = *(const struct in_addr *) a; + const acl_ip_data *q = b; + const struct in_addr B = q->addr1; + const struct in_addr C = q->addr2; int rc = 0; A.s_addr &= q->mask.s_addr; /* apply netmask */ if (C.s_addr == 0) { /* single address check */ @@ -2159,6 +2165,7 @@ aclDumpGeneric(const acl * a) return aclDumpRegexList(a->data); break; case ACL_SRC_ASN: + case ACL_MAXCONN: case ACL_DST_ASN: return aclDumpIntlistList(a->data); break; @@ -2316,10 +2323,10 @@ aclMatchArp(void *dataptr, struct in_addr c) } static int -aclArpCompare(const void *data, splayNode * n) +aclArpCompare(const void *a, const void *b) { - const unsigned short *d1 = data; - const unsigned short *d2 = n->data; + const unsigned short *d1 = a; + const unsigned short *d2 = b; if (d1[0] != d2[0]) return (d1[0] > d2[0]) ? 1 : -1; if (d1[1] != d2[1]) diff --git a/src/cf.data.pre b/src/cf.data.pre index 1f404584c9..ee96aae267 100644 --- a/src/cf.data.pre +++ b/src/cf.data.pre @@ -1,6 +1,6 @@ # -# $Id: cf.data.pre,v 1.161 1999/08/02 06:18:30 wessels Exp $ +# $Id: cf.data.pre,v 1.162 1999/10/04 05:05:01 wessels Exp $ # # # SQUID Internet Object Cache http://squid.nlanr.net/Squid/ @@ -589,16 +589,19 @@ COMMENT_END NAME: cache_dir TYPE: cachedir DEFAULT: none -DEFAULT_IF_NONE: @DEFAULT_SWAP_DIR@ 100 16 256 +DEFAULT_IF_NONE: ufs @DEFAULT_SWAP_DIR@ 100 16 256 LOC: Config.cacheSwap DOC_START Usage: - cache_dir Directory-Name Mbytes Level-1 Level2 + cache_dir Type Directory-Name Mbytes Level-1 Level2 You can specify multiple cache_dir lines to spread the cache among different disk partitions. + Type specifies the kind of storage system to use. If you + don't know what to put here, then use "ufs". + 'Directory' is a top-level directory where cache swap files will be stored. If you want to use an entire disk for caching, then this can be the mount-point directory. @@ -619,7 +622,7 @@ DOC_START will be created under each first-level directory. The default is 256. -cache_dir @DEFAULT_SWAP_DIR@ 100 16 256 +cache_dir ufs @DEFAULT_SWAP_DIR@ 100 16 256 DOC_END @@ -968,6 +971,16 @@ DOC_START redirect_rewrites_host_header on DOC_END +NAME: redirector_access +TYPE: acl_access +DEFAULT: none +LOC: Config.accessList.redirector +DOC_START + If defined, this access list specifies which requests are + sent to the redirector processes. By default all requests + are sent. +DOC_END + NAME: authenticate_program TYPE: wordlist @@ -1020,6 +1033,28 @@ DOC_START authenticate_ttl 3600 DOC_END +NAME: authenticate_ip_ttl +TYPE: int +LOC: Config.authenticateIpTTL +DEFAULT: 0 +DOC_START + With this option you control how long a proxy authentication + will be bound to a specific IP address. If a request using + the same user name is received during this time then access + will be denied and both users are required to reauthenticate + them selves. The idea behind this is to make it annoying + for people to share their password to their friends, but + yet allow a dialup user to reconnect on a different dialup + port. + + The default is 0 to disable the check. Recommended value + if you have dialup users are no more than 60 (seconds). If + all your users are stationary then higher values may be + used. + +authenticate_ip_ttl 0 +DOC_END + COMMENT_START OPTIONS FOR TUNING THE CACHE ----------------------------------------------------------------------------- @@ -1365,6 +1400,7 @@ DOC_START may also set different timeout values for individual neighbors with the 'connect-timeout' option on a 'cache_peer' line. peer_connect_timeout 30 seconds +DOC_END NAME: siteselect_timeout COMMENT: time-units @@ -1468,6 +1504,10 @@ DOC_START to denial-of-service by having many ident requests going at once. + Only src type ACL checks are fully supported. A src_domain + ACL might work at times, but it will not always provide + the correct result. + This option may be disabled by using --disable-ident with the configure script. ident_timeout 10 seconds @@ -1578,6 +1618,10 @@ DOC_START # # acl snmppublic snmp_community public + acl aclname maxconn number + # This will be matched when the client's IP address has + # more than HTTP connections established. + Examples: acl myexample dst_as 1241 @@ -1590,6 +1634,10 @@ acl manager proto cache_object acl localhost src 127.0.0.1/255.255.255.255 acl SSL_ports port 443 563 acl Safe_ports port 80 21 443 563 70 210 1025-65535 +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http acl CONNECT method CONNECT NOCOMMENT_END DOC_END @@ -2222,7 +2270,7 @@ LOC: Config.Store.objectsPerBucket DOC_START Target number of objects per bucket in the store hash table. Lowering this value increases the total number of buckets and - also the storage maintenance rate. The default is 20. + also the storage maintenance rate. The default is 50. store_objects_per_bucket 50 DOC_END @@ -2537,34 +2585,13 @@ IFDEF: SQUID_SNMP DOC_START Squid can now serve statistics and status information via SNMP. By default it listens to port 3401 on the machine. If you don't - wish to use SNMP, set this to '-1'. + wish to use SNMP, set this to "0". NOTE: SNMP support requires use the --enable-snmp configure command line option. snmp_port 3401 DOC_END -NAME: forward_snmpd_port -TYPE: ushort -LOC: Config.Snmp.localPort -DEFAULT: 0 -IFDEF: SQUID_SNMP -DOC_START - This configures whether we should be forwarding SNMP requests - to another snmpd. The reason for putting this piece of - functionality into Squid was to enable access to the system's - installed snmpd with minimal changes. This option is turned - off by default, check with your /etc/services for your system's - snmp port (usually 161). We do not use getservbyname() to - allow you to set Squid into port 161 and your system's snmpd to - another port by changing /etc/services. - - WARNING: Because of Squid acting as a proxy snmpd for system - you have to do security checks on THIS snmpd for all objects. - Check your snmp_config_file. -forward_snmpd_port 0 -DOC_END - NAME: snmp_access TYPE: acl_access LOC: Config.accessList.snmp @@ -2994,4 +3021,17 @@ DOC_START be allowed to request. DOC_END +NAME: ignore_unknown_nameservers +TYPE: onoff +LOC: Config.onoff.ignore_unknown_nameservers +DEFAULT: on +DOC_START + By default Squid checks that DNS responses are received + from the same IP addresses that they are sent to. If they + don't match, Squid ignores the response and writes a warning + message to cache.log. You can allow responses from unknown + nameservers by setting this option to 'off'. +ignore_unknown_nameservers on +DOC_END + EOF diff --git a/src/client.cc b/src/client.cc index b43b3b803a..5ee167830d 100644 --- a/src/client.cc +++ b/src/client.cc @@ -1,6 +1,6 @@ /* - * $Id: client.cc,v 1.85 1999/05/04 19:22:21 wessels Exp $ + * $Id: client.cc,v 1.86 1999/10/04 05:05:03 wessels Exp $ * * DEBUG: section 0 WWW Client * AUTHOR: Harvest Derived @@ -411,7 +411,7 @@ pipe_handler(int sig) } static void -set_our_signal() +set_our_signal(void) { #if HAVE_SIGACTION struct sigaction sa; diff --git a/src/client_db.cc b/src/client_db.cc index 1793d366a8..358a7a57d2 100644 --- a/src/client_db.cc +++ b/src/client_db.cc @@ -1,6 +1,6 @@ /* - * $Id: client_db.cc,v 1.46 1999/06/17 22:20:36 wessels Exp $ + * $Id: client_db.cc,v 1.47 1999/10/04 05:05:03 wessels Exp $ * * DEBUG: section 0 Client Database * AUTHOR: Duane Wessels @@ -91,6 +91,30 @@ clientdbUpdate(struct in_addr addr, log_type ltype, protocol_t p, size_t size) } } +/* + * clientdbEstablished() + * This function tracks the number of currently established connections + * for a client IP address. When a connection is accepted, call this + * with delta = 1. When the connection is closed, call with delta = + * -1. To get the current value, simply call with delta = 0. + */ +int +clientdbEstablished(struct in_addr addr, int delta) +{ + char *key; + ClientInfo *c; + if (!Config.onoff.client_db) + return 0; + key = inet_ntoa(addr); + c = (ClientInfo *) hash_lookup(client_table, key); + if (c == NULL) + c = clientdbAdd(addr); + if (c == NULL) + debug_trap("clientdbUpdate: Failed to add entry"); + c->n_established += delta; + return c->n_established; +} + #define CUTOFF_SECONDS 3600 int clientdbCutoffDenied(struct in_addr addr) @@ -147,6 +171,8 @@ clientdbDump(StoreEntry * sentry) while ((c = (ClientInfo *) hash_next(client_table))) { storeAppendPrintf(sentry, "Address: %s\n", c->key); storeAppendPrintf(sentry, "Name: %s\n", fqdnFromAddr(c->addr)); + storeAppendPrintf(sentry, "Currently established connections: %d\n", + c->n_established); storeAppendPrintf(sentry, " ICP Requests %d\n", c->Icp.n_requests); for (l = LOG_TAG_NONE; l < LOG_TYPE_MAX; l++) { diff --git a/src/client_side.cc b/src/client_side.cc index 512cb703e1..11eb3c1309 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -1,6 +1,6 @@ /* - * $Id: client_side.cc,v 1.461 1999/08/02 06:18:32 wessels Exp $ + * $Id: client_side.cc,v 1.462 1999/10/04 05:05:04 wessels Exp $ * * DEBUG: section 33 Client-side Routines * AUTHOR: Duane Wessels @@ -41,9 +41,21 @@ #endif #include #include +#if HAVE_IP_COMPAT_H #include +#elif HAVE_NETINET_IP_COMPAT_H +#include +#endif +#if HAVE_IP_FIL_H #include +#elif HAVE_NETINET_IP_FIL_H +#include +#endif +#if HAVE_IP_NAT_H #include +#elif HAVE_NETINET_IP_NAT_H +#include +#endif #endif @@ -111,7 +123,7 @@ checkAccelOnly(clientHttpRequest * http) } #if USE_IDENT -void +static void clientIdentDone(const char *ident, void *data) { ConnStateData *conn = data; @@ -192,7 +204,7 @@ clientAccessCheckDone(int answer, void *data) ErrorState *err = NULL; debug(33, 2) ("The request %s %s is %s, because it matched '%s'\n", RequestMethodStr[http->request->method], http->uri, - answer ? "ALLOWED" : "DENIED", + answer == ACCESS_ALLOWED ? "ALLOWED" : "DENIED", AclMatchedName ? AclMatchedName : "NO ACL's"); http->acl_checklist = NULL; if (answer == ACCESS_ALLOWED) { @@ -317,7 +329,7 @@ clientProcessExpired(void *data) /* delay_id is already set on original store client */ delaySetStoreClient(entry, http, delayClient(http->request)); #endif - entry->lastmod = http->old_entry->lastmod; + http->request->lastmod = http->old_entry->lastmod; debug(33, 5) ("clientProcessExpired: lastmod %d\n", (int) entry->lastmod); entry->refcount++; /* EXPIRED CASE */ #if HEAP_REPLACEMENT @@ -735,6 +747,7 @@ connStateFree(int fd, void *data) clientHttpRequest *http; debug(33, 3) ("connStateFree: FD %d\n", fd); assert(connState != NULL); + clientdbEstablished(connState->peer.sin_addr, -1); /* decrement */ while ((http = connState->chr) != NULL) { assert(http->conn == connState); assert(connState->chr != connState->chr->next); @@ -772,6 +785,7 @@ clientInterpretRequestHeaders(clientHttpRequest * http) no_cache++; stringClean(&s); } + request->cache_control = httpHeaderGetCc(req_hdr); if (request->cache_control) if (EBIT_TEST(request->cache_control->mask, CC_NO_CACHE)) no_cache++; @@ -823,7 +837,6 @@ clientInterpretRequestHeaders(clientHttpRequest * http) stringClean(&s); } #endif - request->cache_control = httpHeaderGetCc(req_hdr); if (request->method == METHOD_TRACE) { request->max_forwards = httpHeaderGetInt(req_hdr, HDR_MAX_FORWARDS); } @@ -1189,7 +1202,7 @@ clientBuildReplyHeader(clientHttpRequest * http, HttpReply * rep) http->lookup_type ? http->lookup_type : "NONE", getMyHostname(), Config.Port.http->i); #endif - if (httpReplyBodySize(request->method, http->entry->mem_obj->reply) < 0) { + if (httpReplyBodySize(request->method, rep) < 0) { debug(33, 3) ("clientBuildReplyHeader: can't keep-alive, unknown body size\n"); request->flags.proxy_keepalive = 0; } @@ -1213,21 +1226,25 @@ static HttpReply * clientBuildReply(clientHttpRequest * http, const char *buf, size_t size) { HttpReply *rep = httpReplyCreate(); - if (httpReplyParse(rep, buf)) { + size_t k = headersEnd(buf, size); + if (k && httpReplyParse(rep, buf, k)) { /* enforce 1.0 reply version */ rep->sline.version = 1.0; /* do header conversions */ clientBuildReplyHeader(http, rep); /* if we do ranges, change status to "Partial Content" */ if (http->request->range) - httpStatusLineSet(&rep->sline, rep->sline.version, HTTP_PARTIAL_CONTENT, NULL); + httpStatusLineSet(&rep->sline, rep->sline.version, + HTTP_PARTIAL_CONTENT, NULL); } else { /* parsing failure, get rid of the invalid reply */ httpReplyDestroy(rep); rep = NULL; /* if we were going to do ranges, backoff */ - if (http->request->range) - clientBuildRangeHeader(http, rep); /* will fail and destroy request->range */ + if (http->request->range) { + /* this will fail and destroy request->range */ + clientBuildRangeHeader(http, rep); + } } return rep; } @@ -1370,7 +1387,7 @@ clientCacheHit(void *data, char *buf, ssize_t size) storeUnlockObject(e); e = clientCreateStoreEntry(http, http->request->method, null_request_flags); http->entry = e; - httpReplyParse(e->mem_obj->reply, mb.buf); + httpReplyParse(e->mem_obj->reply, mb.buf, mb.size); storeAppend(e, mb.buf, mb.size); memBufClean(&mb); storeComplete(e); @@ -1423,20 +1440,30 @@ clientPackRangeHdr(const HttpReply * rep, const HttpHdrRangeSpec * spec, String memBufPrintf(mb, crlf); } -/* extracts a "range" from *buf and appends them to mb, updating all offsets and such */ +/* + * extracts a "range" from *buf and appends them to mb, updating + * all offsets and such. + */ static void -clientPackRange(clientHttpRequest * http, HttpHdrRangeIter * i, const char **buf, ssize_t * size, MemBuf * mb) +clientPackRange(clientHttpRequest * http, + HttpHdrRangeIter * i, + const char **buf, + ssize_t * size, + MemBuf * mb) { - const size_t copy_sz = i->debt_size <= *size ? i->debt_size : *size; + const ssize_t copy_sz = i->debt_size <= *size ? i->debt_size : *size; off_t body_off = http->out.offset - i->prefix_size; assert(*size > 0); assert(i->spec); - - /* intersection of "have" and "need" ranges must not be empty */ + /* + * intersection of "have" and "need" ranges must not be empty + */ assert(body_off < i->spec->offset + i->spec->length); assert(body_off + *size > i->spec->offset); - - /* put boundary and headers at the beginning of a range in a multi-range */ + /* + * put boundary and headers at the beginning of a range in a + * multi-range + */ if (http->request->range->specs.count > 1 && i->debt_size == i->spec->length) { assert(http->entry->mem_obj); clientPackRangeHdr( @@ -1446,18 +1473,22 @@ clientPackRange(clientHttpRequest * http, HttpHdrRangeIter * i, const char **buf mb ); } - /* append content */ + /* + * append content + */ debug(33, 3) ("clientPackRange: appending %d bytes\n", copy_sz); memBufAppend(mb, *buf, copy_sz); - - /* update offsets */ + /* + * update offsets + */ *size -= copy_sz; i->debt_size -= copy_sz; body_off += copy_sz; *buf += copy_sz; http->out.offset = body_off + i->prefix_size; /* sync */ - - /* paranoid check */ + /* + * paranoid check + */ assert(*size >= 0 && i->debt_size >= 0); } @@ -1610,6 +1641,7 @@ clientSendMoreData(void *data, char *buf, ssize_t size) http->entry = clientCreateStoreEntry(http, http->request->method, null_request_flags); errorAppendEntry(http->entry, err); + httpReplyDestroy(rep); return; } else if (rep) { body_size = size - rep->hdr_sz; @@ -2241,10 +2273,10 @@ parseHttpRequest(ConnStateData * conn, method_t * method_p, int *status, strcpy(http->uri, url); http->flags.accel = 0; } - if (!stringHasWhitespace(http->uri)) + if (!stringHasCntl((unsigned char *) http->uri)) http->log_uri = xstrndup(http->uri, MAX_URL); else - http->log_uri = xstrndup(rfc1738_escape(http->uri), MAX_URL); + http->log_uri = xstrndup(rfc1738_escape_unescaped(http->uri), MAX_URL); debug(33, 5) ("parseHttpRequest: Complete request received\n"); if (free_request) safe_free(url); @@ -2614,6 +2646,7 @@ httpAccept(int sock, void *data) #endif commSetSelect(fd, COMM_SELECT_READ, clientReadRequest, connState, 0); commSetDefer(fd, clientReadDefer, connState); + clientdbEstablished(peer.sin_addr, 1); (*N)++; } } diff --git a/src/comm.cc b/src/comm.cc index 4cb156d19a..f9fb0bf768 100644 --- a/src/comm.cc +++ b/src/comm.cc @@ -1,6 +1,6 @@ /* - * $Id: comm.cc,v 1.302 1999/05/03 21:54:59 wessels Exp $ + * $Id: comm.cc,v 1.303 1999/10/04 05:05:06 wessels Exp $ * * DEBUG: section 5 Socket Functions * AUTHOR: Harvest Derived @@ -160,11 +160,11 @@ comm_open(int sock_type, case ENFILE: case EMFILE: debug(50, 1) ("comm_open: socket failure: %s\n", xstrerror()); + fdAdjustReserved(); break; default: debug(50, 0) ("comm_open: socket failure: %s\n", xstrerror()); } - fdAdjustReserved(); return -1; } /* update fdstat */ @@ -302,12 +302,14 @@ commResetFD(ConnectStateData * cs) Counter.syscalls.sock.sockets++; if (fd2 < 0) { debug(5, 0) ("commResetFD: socket: %s\n", xstrerror()); - fdAdjustReserved(); + if (ENFILE == errno || EMFILE == errno) + fdAdjustReserved(); return 0; } if (dup2(fd2, cs->fd) < 0) { debug(5, 0) ("commResetFD: dup2: %s\n", xstrerror()); - fdAdjustReserved(); + if (ENFILE == errno || EMFILE == errno) + fdAdjustReserved(); return 0; } close(fd2); diff --git a/src/debug.cc b/src/debug.cc index 596ca54e6d..5ce1ab87f8 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -1,6 +1,6 @@ /* - * $Id: debug.cc,v 1.76 1999/08/02 06:18:33 wessels Exp $ + * $Id: debug.cc,v 1.77 1999/10/04 05:05:07 wessels Exp $ * * DEBUG: section 0 Debug Routines * AUTHOR: Harvest Derived @@ -370,7 +370,7 @@ ctx_exit(Ctx ctx) * info for deducing the current execution stack */ static void -ctx_print() +ctx_print(void) { /* lock so _db_print will not call us recursively */ Ctx_Lock++; diff --git a/src/delay_pools.cc b/src/delay_pools.cc index 6a6ded583a..72517d0164 100644 --- a/src/delay_pools.cc +++ b/src/delay_pools.cc @@ -1,6 +1,6 @@ /* - * $Id: delay_pools.cc,v 1.10 1999/05/19 19:57:41 wessels Exp $ + * $Id: delay_pools.cc,v 1.11 1999/10/04 05:05:08 wessels Exp $ * * DEBUG: section 77 Delay Pools * AUTHOR: David Luyer @@ -149,7 +149,7 @@ delayIdZero(void *hlink) } void -delayFreeDelayData() +delayFreeDelayData(void) { safe_free(delay_data); if (!delay_id_ptr_hash) diff --git a/src/dns_internal.cc b/src/dns_internal.cc index ba2b93d160..30fa2fbfd5 100644 --- a/src/dns_internal.cc +++ b/src/dns_internal.cc @@ -1,6 +1,6 @@ /* - * $Id: dns_internal.cc,v 1.15 1999/08/02 06:18:34 wessels Exp $ + * $Id: dns_internal.cc,v 1.16 1999/10/04 05:05:09 wessels Exp $ * * DEBUG: section 78 DNS lookups; interacts with lib/rfc1035.c * AUTHOR: Duane Wessels @@ -130,6 +130,8 @@ idnsParseResolvConf(void) idnsFreeNameservers(); while (fgets(buf, 512, fp)) { t = strtok(buf, w_space); + if (t == NULL) + continue;; if (strcasecmp(t, "nameserver")) continue; t = strtok(NULL, w_space); @@ -306,12 +308,17 @@ idnsRead(int fd, void *data) len, inet_ntoa(from.sin_addr)); ns = idnsFromKnownNameserver(&from); - if (ns < 0) { - debug(78, 1) ("idnsRead: Reply from unknown nameserver [%s]\n", - inet_ntoa(from.sin_addr)); + if (ns >= 0) { + nameservers[ns].nreplies++; + } else if (Config.onoff.ignore_unknown_nameservers) { + static time_t last_warning = 0; + if (squid_curtime - last_warning > 60) { + debug(78, 1) ("WARNING: Reply from unknown nameserver [%s]\n", + inet_ntoa(from.sin_addr)); + last_warning = squid_curtime; + } continue; } - nameservers[ns].nreplies++; idnsGrokReply(rbuf, len); } if (lru_list.head) diff --git a/src/enums.h b/src/enums.h index 9f7a5e8181..01ee773446 100644 --- a/src/enums.h +++ b/src/enums.h @@ -1,6 +1,6 @@ /* - * $Id: enums.h,v 1.160 1999/08/02 06:18:35 wessels Exp $ + * $Id: enums.h,v 1.161 1999/10/04 05:05:09 wessels Exp $ * * * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ @@ -114,6 +114,7 @@ typedef enum { ACL_SRC_ARP, ACL_SNMP_COMMUNITY, ACL_NETDB_SRC_RTT, + ACL_MAXCONN, ACL_ENUM_MAX } squid_acl; @@ -290,7 +291,8 @@ typedef enum { SOURCE_FASTEST, ROUNDROBIN_PARENT, #if USE_CACHE_DIGESTS - CACHE_DIGEST_HIT, + CD_PARENT_HIT, + CD_SIBLING_HIT, #endif #if USE_CARP CARP, @@ -457,7 +459,8 @@ enum { ENTRY_NEGCACHED, ENTRY_VALIDATED, ENTRY_BAD_LENGTH, - ENTRY_ABORTED + ENTRY_ABORTED, + ENTRY_DONT_LOG /* hack for gross 'Pump' entries */ }; typedef enum { @@ -526,6 +529,10 @@ typedef enum { MEM_HELPER_REQUEST, MEM_HELPER_SERVER, MEM_HIERARCHYLOGENTRY, +#if USE_HTCP + MEM_HTCP_SPECIFIER, + MEM_HTCP_DETAIL, +#endif MEM_HTTP_HDR_CC, MEM_HTTP_HDR_CONTENT_RANGE, MEM_HTTP_HDR_ENTRY, diff --git a/src/errorpage.cc b/src/errorpage.cc index b0e82e6221..7b4c22385a 100644 --- a/src/errorpage.cc +++ b/src/errorpage.cc @@ -1,6 +1,6 @@ /* - * $Id: errorpage.cc,v 1.151 1999/06/19 16:34:36 wessels Exp $ + * $Id: errorpage.cc,v 1.152 1999/10/04 05:05:10 wessels Exp $ * * DEBUG: section 4 Error Generation * AUTHOR: Duane Wessels @@ -67,7 +67,7 @@ static const struct { ERR_SQUID_SIGNATURE, "\n
    \n" "
    \n" - "Generated %T by %h (%s)\n" + "Generated %T by %h (%s)\n" "\n" } }; @@ -378,6 +378,9 @@ errorStateFree(ErrorState * err) safe_free(err->host); safe_free(err->dnsserver_msg); safe_free(err->request_hdrs); + wordlistDestroy(&err->ftp.server_msg); + safe_free(err->ftp.request); + safe_free(err->ftp.reply); if (err->flags.flag_cbdata) cbdataFree(err); else @@ -451,7 +454,7 @@ errorConvert(char token, ErrorState * err) break; case 'g': /* FTP SERVER MESSAGE */ - wordlistCat(err->ftp_server_msg, &mb); + wordlistCat(err->ftp.server_msg, &mb); break; case 'h': memBufPrintf(&mb, "%s", getMyHostname()); diff --git a/src/forward.cc b/src/forward.cc index b5dadfe1bb..528edb1852 100644 --- a/src/forward.cc +++ b/src/forward.cc @@ -1,6 +1,6 @@ /* - * $Id: forward.cc,v 1.65 1999/09/29 00:22:13 wessels Exp $ + * $Id: forward.cc,v 1.66 1999/10/04 05:05:11 wessels Exp $ * * DEBUG: section 17 Request Forwarding * AUTHOR: Duane Wessels @@ -525,7 +525,7 @@ fwdFail(FwdState * fwdState, ErrorState * errorState) /* * Called when someone else calls StoreAbort() on this entry */ -void +static void fwdAbort(void *data) { FwdState *fwdState = data; diff --git a/src/ftp.cc b/src/ftp.cc index f44e558bf5..4b60e6aea9 100644 --- a/src/ftp.cc +++ b/src/ftp.cc @@ -1,6 +1,6 @@ /* - * $Id: ftp.cc,v 1.286 1999/06/24 21:12:22 wessels Exp $ + * $Id: ftp.cc,v 1.287 1999/10/04 05:05:12 wessels Exp $ * * DEBUG: section 9 File Transfer Protocol (FTP) * AUTHOR: Harvest Derived @@ -85,9 +85,11 @@ typedef struct _Ftpdata { request_t *request; char user[MAX_URL]; char password[MAX_URL]; + int password_url; char *reply_hdr; int reply_hdr_state; char *title_url; + char *base_href; int conn_att; int login_att; ftp_state_t state; @@ -163,6 +165,8 @@ static void ftpUnhack(FtpStateData * ftpState); static void ftpScheduleReadControlReply(FtpStateData *, int); static void ftpHandleControlReply(FtpStateData *); static char *ftpHtmlifyListEntry(char *line, FtpStateData * ftpState); +static void ftpFailed(FtpStateData *, err_type /* ERR_NONE if unknown */ ); +static void ftpFailedErrorMessage(FtpStateData *, err_type /* ERR_NONE if unknown */ ); /* State machine functions * send == state transition @@ -234,24 +238,24 @@ Quit - FTPSM *FTP_SM_FUNCS[] = { - ftpReadWelcome, - ftpReadUser, - ftpReadPass, - ftpReadType, - ftpReadMdtm, - ftpReadSize, - ftpReadPort, - ftpReadPasv, - ftpReadCwd, + ftpReadWelcome, /* BEGIN */ + ftpReadUser, /* SENT_USER */ + ftpReadPass, /* SENT_PASS */ + ftpReadType, /* SENT_TYPE */ + ftpReadMdtm, /* SENT_MDTM */ + ftpReadSize, /* SENT_SIZE */ + ftpReadPort, /* SENT_PORT */ + ftpReadPasv, /* SENT_PASV */ + ftpReadCwd, /* SENT_CWD */ ftpReadList, /* SENT_LIST */ ftpReadList, /* SENT_NLST */ - ftpReadRest, - ftpReadRetr, - ftpReadStor, - ftpReadQuit, - ftpReadTransferDone, - ftpSendReply, - ftpReadMkdir + ftpReadRest, /* SENT_REST */ + ftpReadRetr, /* SENT_RETR */ + ftpReadStor, /* SENT_STOR */ + ftpReadQuit, /* SENT_QUIT */ + ftpReadTransferDone, /* READING_DATA (RETR,LIST,NLST) */ + ftpSendReply, /* WRITING_DATA (STOR) */ + ftpReadMkdir /* SENT_MKDIR */ }; static void @@ -294,6 +298,7 @@ ftpStateFree(int fdnotused, void *data) safe_free(ftpState->old_reply); safe_free(ftpState->old_filepath); safe_free(ftpState->title_url); + safe_free(ftpState->base_href); safe_free(ftpState->filepath); safe_free(ftpState->data.host); if (ftpState->data.fd > -1) { @@ -313,6 +318,7 @@ ftpLoginParser(const char *login, FtpStateData * ftpState, int escaped) xstrncpy(ftpState->password, s + 1, MAX_URL); if (escaped) rfc1738_unescape(ftpState->password); + ftpState->password_url = 1; } else { xstrncpy(ftpState->password, null_string, MAX_URL); } @@ -330,18 +336,8 @@ ftpTimeout(int fd, void *data) FtpStateData *ftpState = data; StoreEntry *entry = ftpState->entry; debug(9, 4) ("ftpTimeout: FD %d: '%s'\n", fd, storeUrl(entry)); - if (entry->store_status == STORE_PENDING) { - if (entry->mem_obj->inmem_hi == 0) { - fwdFail(ftpState->fwd, - errorCon(ERR_READ_TIMEOUT, HTTP_GATEWAY_TIMEOUT)); - } - } - if (ftpState->data.fd > -1) { - comm_close(ftpState->data.fd); - ftpState->data.fd = -1; - } - comm_close(ftpState->ctrl.fd); - /* don't modify ftpState here, it has been freed */ + ftpFailed(ftpState, ERR_READ_TIMEOUT); + /* ftpFailed closes ctrl.fd and frees ftpState */ } static void @@ -361,7 +357,7 @@ ftpListingStart(FtpStateData * ftpState) storeAppendPrintf(e, "\n"); if (ftpState->flags.use_base) storeAppendPrintf(e, "\n", - ftpState->title_url); + ftpState->base_href); storeAppendPrintf(e, "\n"); if (ftpState->cwd_message) { storeAppendPrintf(e, "
    \n");
    @@ -697,7 +693,7 @@ ftpHtmlifyListEntry(char *line, FtpStateData * ftpState)
     	}
         }
         /* {icon} {text} . . . {date}{size}{chdir}{view}{download}{link}\n  */
    -    xstrncpy(href, rfc1738_escape(parts->name), 2048);
    +    xstrncpy(href, rfc1738_escape_part(parts->name), 2048);
         xstrncpy(text, parts->showname, 2048);
         switch (parts->type) {
         case 'd':
    @@ -831,11 +827,13 @@ ftpReadComplete(FtpStateData * ftpState)
     {
         debug(9, 3) ("ftpReadComplete\n");
         /* Connection closed; retrieval done. */
    -    if (ftpState->flags.html_header_sent)
    -	ftpListingFinish(ftpState);
    -    if (!ftpState->flags.put) {
    -	storeTimestampsSet(ftpState->entry);
    -	fwdComplete(ftpState->fwd);
    +    if (ftpState->data.fd > -1) {
    +	/*
    +	 * close data socket so it does not occupy resources while
    +	 * we wait
    +	 */
    +	comm_close(ftpState->data.fd);
    +	ftpState->data.fd = -1;
         }
         /* expect the "transfer complete" message on the control socket */
         ftpScheduleReadControlReply(ftpState, 1);
    @@ -895,8 +893,9 @@ ftpDataRead(int fd, void *data)
     		data,
     		Config.Timeout.read);
     	} else {
    -	    assert(mem->inmem_hi > 0);
    -	    ftpDataTransferDone(ftpState);
    +	    ftpFailed(ftpState, ERR_READ_ERROR);
    +	    /* ftpFailed closes ctrl.fd and frees ftpState */
    +	    return;
     	}
         } else if (len == 0) {
     	ftpReadComplete(ftpState);
    @@ -930,12 +929,10 @@ ftpCheckAuth(FtpStateData * ftpState, const HttpHeader * req_hdr)
         char *orig_user;
         const char *auth;
         ftpLoginParser(ftpState->request->login, ftpState, FTP_LOGIN_ESCAPED);
    -    if (ftpState->user[0] && ftpState->password[0])
    -	return 1;		/* name and passwd both in URL */
    -    if (!ftpState->user[0] && !ftpState->password[0])
    -	return 1;		/* no name or passwd */
    -    if (ftpState->password[0])
    -	return 1;		/* passwd with no name? */
    +    if (!ftpState->user[0])
    +	return 1;		/* no name */
    +    if (ftpState->password_url || ftpState->password[0])
    +	return 1;		/* passwd provided in URL */
         /* URL has name, but no passwd */
         if (!(auth = httpHeaderGetAuth(req_hdr, HDR_AUTHORIZATION, "Basic")))
     	return 0;		/* need auth header */
    @@ -967,7 +964,6 @@ ftpCheckUrlpath(FtpStateData * ftpState)
         ftpState->flags.use_base = 1;
         /* check for null path */
         if (!l) {
    -	stringReset(&request->urlpath, "/");
     	ftpState->flags.isdir = 1;
     	ftpState->flags.root_dir = 1;
         } else if (!strCmp(request->urlpath, "/%2f/")) {
    @@ -1005,6 +1001,21 @@ ftpBuildTitleUrl(FtpStateData * ftpState)
         if (request->port != urlDefaultPort(PROTO_FTP))
     	snprintf(&t[strlen(t)], len - strlen(t), ":%d", request->port);
         strcat(t, strBuf(request->urlpath));
    +    t = ftpState->base_href = xcalloc(len, 1);
    +    strcat(t, "ftp://");
    +    if (strcmp(ftpState->user, "anonymous")) {
    +	strcat(t, rfc1738_escape_part(ftpState->user));
    +	if (ftpState->password_url) {
    +	    strcat(t, ":");
    +	    strcat(t, rfc1738_escape_part(ftpState->password));
    +	}
    +	strcat(t, "@");
    +    }
    +    strcat(t, request->host);
    +    if (request->port != urlDefaultPort(PROTO_FTP))
    +	snprintf(&t[strlen(t)], len - strlen(t), ":%d", request->port);
    +    strcat(t, strBuf(request->urlpath));
    +    strcat(t, "/");
     }
     
     void
    @@ -1030,7 +1041,7 @@ ftpStart(FwdState * fwd)
         ftpState->data.fd = -1;
         ftpState->size = -1;
         ftpState->mdtm = -1;
    -    ftpState->flags.pasv_supported = 1;
    +    ftpState->flags.pasv_supported = !fwd->flags.ftp_pasv_failed;
         ftpState->flags.rest_supported = 1;
         ftpState->fwd = fwd;
         comm_add_close_handler(fd, ftpStateFree, ftpState);
    @@ -1083,6 +1094,7 @@ ftpWriteCommand(const char *buf, FtpStateData * ftpState)
     {
         debug(9, 5) ("ftpWriteCommand: %s\n", buf);
         safe_free(ftpState->ctrl.last_command);
    +    safe_free(ftpState->ctrl.last_reply);
         ftpState->ctrl.last_command = xstrdup(buf);
         comm_write(ftpState->ctrl.fd,
     	xstrdup(buf),
    @@ -1097,8 +1109,6 @@ static void
     ftpWriteCommandCallback(int fd, char *bufnotused, size_t size, int errflag, void *data)
     {
         FtpStateData *ftpState = data;
    -    StoreEntry *entry = ftpState->entry;
    -    ErrorState *err;
         debug(9, 7) ("ftpWriteCommandCallback: wrote %d bytes\n", size);
         if (size > 0) {
     	fd_bytes(fd, size, FD_WRITE);
    @@ -1109,13 +1119,9 @@ ftpWriteCommandCallback(int fd, char *bufnotused, size_t size, int errflag, void
     	return;
         if (errflag) {
     	debug(50, 1) ("ftpWriteCommandCallback: FD %d: %s\n", fd, xstrerror());
    -	if (entry->mem_obj->inmem_hi == 0) {
    -	    err = errorCon(ERR_WRITE_ERROR, HTTP_SERVICE_UNAVAILABLE);
    -	    err->xerrno = errno;
    -	    err->request = requestLink(ftpState->request);
    -	    errorAppendEntry(entry, err);
    -	}
    -	comm_close(ftpState->ctrl.fd);
    +	ftpFailed(ftpState, ERR_WRITE_ERROR);
    +	/* ftpFailed closes ctrl.fd and frees ftpState */
    +	return;
         }
     }
     
    @@ -1206,8 +1212,11 @@ ftpReadControlReply(int fd, void *data)
         FtpStateData *ftpState = data;
         StoreEntry *entry = ftpState->entry;
         int len;
    -    ErrorState *err;
         debug(9, 5) ("ftpReadControlReply\n");
    +    if (EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
    +	comm_close(ftpState->ctrl.fd);
    +	return;
    +    }
         assert(ftpState->ctrl.offset < ftpState->ctrl.size);
         Counter.syscalls.sock.reads++;
         len = read(fd,
    @@ -1224,26 +1233,17 @@ ftpReadControlReply(int fd, void *data)
     	if (ignoreErrno(errno)) {
     	    ftpScheduleReadControlReply(ftpState, 0);
     	} else {
    -	    if (entry->mem_obj->inmem_hi == 0) {
    -		err = errorCon(ERR_READ_ERROR, HTTP_INTERNAL_SERVER_ERROR);
    -		err->xerrno = errno;
    -		err->request = requestLink(ftpState->request);
    -		errorAppendEntry(entry, err);
    -	    }
    -	    comm_close(ftpState->ctrl.fd);
    +	    ftpFailed(ftpState, ERR_READ_ERROR);
    +	    /* ftpFailed closes ctrl.fd and frees ftpState */
    +	    return;
     	}
     	return;
         }
         if (len == 0) {
     	if (entry->store_status == STORE_PENDING) {
    -	    storeReleaseRequest(entry);
    -	    if (entry->mem_obj->inmem_hi == 0) {
    -		err = errorCon(ERR_FTP_FAILURE, HTTP_INTERNAL_SERVER_ERROR);
    -		err->xerrno = 0;
    -		err->request = requestLink(ftpState->request);
    -		err->ftp_server_msg = ftpState->ctrl.message;
    -		errorAppendEntry(entry, err);
    -	    }
    +	    ftpFailed(ftpState, ERR_FTP_FAILURE);
    +	    /* ftpFailed closes ctrl.fd and frees ftpState */
    +	    return;
     	}
     	comm_close(ftpState->ctrl.fd);
     	return;
    @@ -1304,6 +1304,8 @@ ftpReadWelcome(FtpStateData * ftpState)
         debug(9, 3) ("ftpReadWelcome\n");
         if (ftpState->flags.pasv_only)
     	ftpState->login_att++;
    +    /* Dont retry if the FTP server accepted the connection */
    +    ftpState->fwd->flags.dont_retry = 1;
         if (code == 220) {
     	if (ftpState->ctrl.message) {
     	    if (strstr(ftpState->ctrl.message->key, "NetWare"))
    @@ -1721,6 +1723,9 @@ ftpReadPasv(FtpStateData * ftpState)
         debug(9, 5) ("ftpReadPasv: connecting to %s, port %d\n", junk, port);
         ftpState->data.port = port;
         ftpState->data.host = xstrdup(junk);
    +    safe_free(ftpState->ctrl.last_command);
    +    safe_free(ftpState->ctrl.last_reply);
    +    ftpState->ctrl.last_command = xstrdup("Connect to server data port");
         commConnectStart(fd, junk, port, ftpPasvCallback, ftpState);
     }
     
    @@ -1728,17 +1733,13 @@ static void
     ftpPasvCallback(int fd, int status, void *data)
     {
         FtpStateData *ftpState = data;
    -    request_t *request = ftpState->request;
    -    ErrorState *err;
         debug(9, 3) ("ftpPasvCallback\n");
         if (status != COMM_OK) {
    -	err = errorCon(ERR_CONNECT_FAIL, HTTP_SERVICE_UNAVAILABLE);
    -	err->xerrno = errno;
    -	err->host = xstrdup(ftpState->data.host);
    -	err->port = ftpState->data.port;
    -	err->request = requestLink(request);
    -	errorAppendEntry(ftpState->entry, err);
    -	comm_close(ftpState->ctrl.fd);
    +	debug(9, 2) ("ftpPasvCallback: failed to connect. Retrying without PASV.\n");
    +	ftpState->fwd->flags.dont_retry = 0;	/* this is a retryable error */
    +	ftpState->fwd->flags.ftp_pasv_failed = 1;
    +	ftpFailed(ftpState, ERR_NONE);
    +	/* ftpFailed closes ctrl.fd and frees ftpState */
     	return;
         }
         ftpRestOrList(ftpState);
    @@ -1868,7 +1869,6 @@ ftpAcceptDataConnection(int fd, void *data)
     static void
     ftpRestOrList(FtpStateData * ftpState)
     {
    -
         debug(9, 3) ("This is ftpRestOrList\n");
         if (ftpState->flags.put) {
     	debug(9, 3) ("ftpRestOrList: Sending STOR request...\n");
    @@ -1889,10 +1889,20 @@ ftpRestOrList(FtpStateData * ftpState)
     static void
     ftpSendStor(FtpStateData * ftpState)
     {
    -    assert(ftpState->filepath != NULL);
    -    snprintf(cbuf, 1024, "STOR %s\r\n", ftpState->filepath);
    -    ftpWriteCommand(cbuf, ftpState);
    -    ftpState->state = SENT_STOR;
    +    if (ftpState->filepath != NULL) {
    +	/* Plain file upload */
    +	snprintf(cbuf, 1024, "STOR %s\r\n", ftpState->filepath);
    +	ftpWriteCommand(cbuf, ftpState);
    +	ftpState->state = SENT_STOR;
    +    } else if (httpHeaderGetInt(&ftpState->request->header, HDR_CONTENT_LENGTH) > 0) {
    +	/* File upload without a filename. use STOU to generate one */
    +	snprintf(cbuf, 1024, "STOU\r\n");
    +	ftpWriteCommand(cbuf, ftpState);
    +	ftpState->state = SENT_STOR;
    +    } else {
    +	/* No file to transfer. Only create directories if needed */
    +	ftpSendReply(ftpState);
    +    }
     }
     
     static void
    @@ -1900,7 +1910,9 @@ ftpReadStor(FtpStateData * ftpState)
     {
         int code = ftpState->ctrl.replycode;
         debug(9, 3) ("This is ftpReadStor\n");
    -    if (code >= 100 && code < 200) {
    +    if (code == 125 || (code == 150 && ftpState->data.host)) {
    +	/* Begin data transfer */
    +	debug(9, 3) ("ftpReadStor: starting data transfer\n");
     	/*
     	 * Cancel the timeout on the Control socket, pumpStart will
     	 * establish one on the data socket.
    @@ -1909,15 +1921,17 @@ ftpReadStor(FtpStateData * ftpState)
     	ftpPutStart(ftpState);
     	debug(9, 3) ("ftpReadStor: writing data channel\n");
     	ftpState->state = WRITING_DATA;
    -    } else if (code == 553) {
    -	/* directory does not exist, have to create, sigh */
    -#if WORK_IN_PROGRESS
    -	ftpTraverseDirectory(ftpState);
    -#endif
    -	ftpSendReply(ftpState);
    +    } else if (code == 150) {
    +	/* Accept data channel */
    +	debug(9, 3) ("ftpReadStor: accepting data channel\n");
    +	commSetSelect(ftpState->data.fd,
    +	    COMM_SELECT_READ,
    +	    ftpAcceptDataConnection,
    +	    ftpState,
    +	    0);
         } else {
    -	debug(9, 3) ("ftpReadStor: that's all folks\n");
    -	ftpSendReply(ftpState);
    +	debug(9, 3) ("ftpReadStor: Unexpected reply code %s\n", code);
    +	ftpFail(ftpState);
         }
     }
     
    @@ -2099,13 +2113,22 @@ ftpReadTransferDone(FtpStateData * ftpState)
     {
         int code = ftpState->ctrl.replycode;
         debug(9, 3) ("This is ftpReadTransferDone\n");
    -    if (code != 226) {
    +    if (code == 226) {
    +	/* Connection closed; retrieval done. */
    +	if (ftpState->flags.html_header_sent)
    +	    ftpListingFinish(ftpState);
    +	if (!ftpState->flags.put) {
    +	    storeTimestampsSet(ftpState->entry);
    +	    fwdComplete(ftpState->fwd);
    +	}
    +	ftpDataTransferDone(ftpState);
    +    } else {			/* != 226 */
     	debug(9, 1) ("ftpReadTransferDone: Got code %d after reading data\n",
     	    code);
    -	debug(9, 1) ("--> releasing '%s'\n", storeUrl(ftpState->entry));
    -	storeReleaseRequest(ftpState->entry);
    +	ftpFailed(ftpState, ERR_FTP_FAILURE);
    +	/* ftpFailed closes ctrl.fd and frees ftpState */
    +	return;
         }
    -    ftpDataTransferDone(ftpState);
     }
     
     static void
    @@ -2204,7 +2227,6 @@ ftpHackShortcut(FtpStateData * ftpState, FTPSM * nextState)
     static void
     ftpFail(FtpStateData * ftpState)
     {
    -    ErrorState *err;
         debug(9, 3) ("ftpFail\n");
         /* Try the / hack to support "Netscape" FTP URL's for retreiving files */
         if (!ftpState->flags.isdir &&	/* Not a directory */
    @@ -2235,45 +2257,80 @@ ftpFail(FtpStateData * ftpState)
     	    break;
     	}
         }
    +    ftpFailed(ftpState, ERR_NONE);
    +    /* ftpFailed closes ctrl.fd and frees ftpState */
    +}
    +
    +static void
    +ftpFailed(FtpStateData * ftpState, err_type error)
    +{
    +    StoreEntry *entry = ftpState->entry;
    +    if (entry->mem_obj->inmem_hi == 0)
    +	ftpFailedErrorMessage(ftpState, error);
    +    if (ftpState->data.fd > -1) {
    +	comm_close(ftpState->data.fd);
    +	ftpState->data.fd = -1;
    +    }
    +    comm_close(ftpState->ctrl.fd);
    +}
    +
    +static void
    +ftpFailedErrorMessage(FtpStateData * ftpState, err_type error)
    +{
    +    ErrorState *err;
    +    char *command, *reply;
         /* Translate FTP errors into HTTP errors */
         err = NULL;
    -    switch (ftpState->state) {
    -    case SENT_USER:
    -    case SENT_PASS:
    -	if (ftpState->ctrl.replycode > 500)
    -	    err = errorCon(ERR_FTP_FORBIDDEN, HTTP_FORBIDDEN);
    -	else if (ftpState->ctrl.replycode == 421)
    -	    err = errorCon(ERR_FTP_UNAVAILABLE, HTTP_SERVICE_UNAVAILABLE);
    +    switch (error) {
    +    case ERR_NONE:
    +	switch (ftpState->state) {
    +	case SENT_USER:
    +	case SENT_PASS:
    +	    if (ftpState->ctrl.replycode > 500)
    +		err = errorCon(ERR_FTP_FORBIDDEN, HTTP_FORBIDDEN);
    +	    else if (ftpState->ctrl.replycode == 421)
    +		err = errorCon(ERR_FTP_UNAVAILABLE, HTTP_SERVICE_UNAVAILABLE);
    +	    break;
    +	case SENT_CWD:
    +	case SENT_RETR:
    +	    if (ftpState->ctrl.replycode == 550)
    +		err = errorCon(ERR_FTP_NOT_FOUND, HTTP_NOT_FOUND);
    +	    break;
    +	default:
    +	    break;
    +	}
     	break;
    -    case SENT_CWD:
    -    case SENT_RETR:
    -	if (ftpState->ctrl.replycode == 550)
    -	    err = errorCon(ERR_FTP_NOT_FOUND, HTTP_NOT_FOUND);
    +    case ERR_READ_TIMEOUT:
    +	err = errorCon(error, HTTP_GATEWAY_TIMEOUT);
     	break;
         default:
    +	err = errorCon(error, HTTP_BAD_GATEWAY);
     	break;
         }
         if (err == NULL)
     	err = errorCon(ERR_FTP_FAILURE, HTTP_BAD_GATEWAY);
    +    err->xerrno = errno;
         err->request = requestLink(ftpState->request);
    -    err->ftp_server_msg = ftpState->ctrl.message;
    +    err->ftp.server_msg = ftpState->ctrl.message;
    +    ftpState->ctrl.message = NULL;
         if (ftpState->old_request)
    -	err->ftp.request = ftpState->old_request;
    +	command = ftpState->old_request;
         else
    -	err->ftp.request = ftpState->ctrl.last_command;
    -    if (err->ftp.request) {
    -	if (!strncmp(err->ftp.request, "PASS", 4))
    -	    err->ftp.request = "PASS ";
    -    }
    +	command = ftpState->ctrl.last_command;
    +    if (command && strncmp(command, "PASS", 4) == 0)
    +	command = "PASS ";
         if (ftpState->old_reply)
    -	err->ftp.reply = ftpState->old_reply;
    +	reply = ftpState->old_reply;
         else
    -	err->ftp.reply = ftpState->ctrl.last_reply;
    -    errorAppendEntry(ftpState->entry, err);
    -    comm_close(ftpState->ctrl.fd);
    +	reply = ftpState->ctrl.last_reply;
    +    if (command)
    +	err->ftp.request = xstrdup(command);
    +    if (reply)
    +	err->ftp.reply = xstrdup(reply);
    +    fwdFail(ftpState->fwd, err);
     }
     
    -void
    +static void
     ftpPumpClosedData(int data_fd, void *data)
     {
         FtpStateData *ftpState = data;
    @@ -2329,6 +2386,9 @@ ftpSendReply(FtpStateData * ftpState)
         if (code == 226) {
     	err_code = (ftpState->mdtm > 0) ? ERR_FTP_PUT_MODIFIED : ERR_FTP_PUT_CREATED;
     	http_code = (ftpState->mdtm > 0) ? HTTP_ACCEPTED : HTTP_CREATED;
    +    } else if (code == 227) {
    +	err_code = ERR_FTP_PUT_CREATED;
    +	http_code = HTTP_CREATED;
         } else {
     	err_code = ERR_FTP_PUT_ERROR;
     	http_code = HTTP_INTERNAL_SERVER_ERROR;
    @@ -2336,16 +2396,16 @@ ftpSendReply(FtpStateData * ftpState)
         err = errorCon(err_code, http_code);
         err->request = requestLink(ftpState->request);
         if (ftpState->old_request)
    -	err->ftp.request = ftpState->old_request;
    +	err->ftp.request = xstrdup(ftpState->old_request);
         else
    -	err->ftp.request = ftpState->ctrl.last_command;
    +	err->ftp.request = xstrdup(ftpState->ctrl.last_command);
         if (ftpState->old_reply)
    -	err->ftp.reply = ftpState->old_reply;
    +	err->ftp.reply = xstrdup(ftpState->old_reply);
         else
    -	err->ftp.reply = ftpState->ctrl.last_reply;
    +	err->ftp.reply = xstrdup(ftpState->ctrl.last_reply);
         errorAppendEntry(ftpState->entry, err);
         storeBufferFlush(ftpState->entry);
    -    comm_close(ftpState->ctrl.fd);
    +    ftpSendQuit(ftpState);
     }
     
     static void
    diff --git a/src/gopher.cc b/src/gopher.cc
    index 9ab5b63363..4b2697eee1 100644
    --- a/src/gopher.cc
    +++ b/src/gopher.cc
    @@ -1,7 +1,7 @@
     
     
     /*
    - * $Id: gopher.cc,v 1.150 1999/01/31 15:58:54 wessels Exp $
    + * $Id: gopher.cc,v 1.151 1999/10/04 05:05:13 wessels Exp $
      *
      * DEBUG: section 10    Gopher
      * AUTHOR: Harvest Derived
    @@ -430,7 +430,7 @@ gopherToHTML(GopherStateData * gopherState, char *inbuf, int len)
     				port[0] = 0;	/* 0 means none */
     			}
     			/* escape a selector here */
    -			escaped_selector = xstrdup(rfc1738_escape(selector));
    +			escaped_selector = xstrdup(rfc1738_escape_part(selector));
     
     			switch (gtype) {
     			case GOPHER_DIRECTORY:
    diff --git a/src/htcp.cc b/src/htcp.cc
    index e64a1a9c64..e51fd604a7 100644
    --- a/src/htcp.cc
    +++ b/src/htcp.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: htcp.cc,v 1.27 1999/06/10 06:10:30 wessels Exp $
    + * $Id: htcp.cc,v 1.28 1999/10/04 05:05:14 wessels Exp $
      *
      * DEBUG: section 31    Hypertext Caching Protocol
      * AUTHOR: Duane Wesssels
    @@ -365,16 +365,22 @@ htcpBuildPacket(htcpStuff * stuff, ssize_t * len)
         htcpHeader hdr;
         char *buf = xcalloc(buflen, 1);
         /* skip the header -- we don't know the overall length */
    -    if (buflen < hdr_sz)
    +    if (buflen < hdr_sz) {
    +	xfree(buf);
     	return NULL;
    +    }
         off += hdr_sz;
         s = htcpBuildData(buf + off, buflen - off, stuff);
    -    if (s < 0)
    +    if (s < 0) {
    +	xfree(buf);
     	return NULL;
    +    }
         off += s;
         s = htcpBuildAuth(buf + off, buflen - off);
    -    if (s < 0)
    +    if (s < 0) {
    +	xfree(buf);
     	return NULL;
    +    }
         off += s;
         hdr.length = htons((u_short) off);
         hdr.major = 0;
    @@ -412,7 +418,7 @@ htcpFreeSpecifier(htcpSpecifier * s)
         safe_free(s->uri);
         safe_free(s->version);
         safe_free(s->req_hdrs);
    -    xfree(s);
    +    memFree(s, MEM_HTCP_SPECIFIER);
     }
     
     static void
    @@ -421,7 +427,7 @@ htcpFreeDetail(htcpDetail * d)
         safe_free(d->resp_hdrs);
         safe_free(d->entity_hdrs);
         safe_free(d->cache_hdrs);
    -    xfree(d);
    +    memFree(d, MEM_HTCP_DETAIL);
     }
     
     static int
    @@ -454,7 +460,7 @@ htcpUnpackCountstr(char *buf, int sz, char **str)
     static htcpSpecifier *
     htcpUnpackSpecifier(char *buf, int sz)
     {
    -    htcpSpecifier *s = xcalloc(1, sizeof(htcpSpecifier));
    +    htcpSpecifier *s = memAllocate(MEM_HTCP_SPECIFIER);
         int o;
         debug(31, 3) ("htcpUnpackSpecifier: %d bytes\n", (int) sz);
         o = htcpUnpackCountstr(buf, sz, &s->method);
    @@ -496,7 +502,7 @@ htcpUnpackSpecifier(char *buf, int sz)
     static htcpDetail *
     htcpUnpackDetail(char *buf, int sz)
     {
    -    htcpDetail *d = xcalloc(1, sizeof(htcpDetail));
    +    htcpDetail *d = memAllocate(MEM_HTCP_DETAIL);
         int o;
         debug(31, 3) ("htcpUnpackDetail: %d bytes\n", (int) sz);
         o = htcpUnpackCountstr(buf, sz, &d->resp_hdrs);
    @@ -541,6 +547,7 @@ htcpTstReply(htcpDataHeader * dhdr, StoreEntry * e, htcpSpecifier * spec, struct
         int hops = 0;
         int samp = 0;
         char cto_buf[128];
    +    memset(&stuff, '\0', sizeof(stuff));
         stuff.op = HTCP_TST;
         stuff.rr = RR_RESPONSE;
         stuff.f1 = 0;
    @@ -667,6 +674,7 @@ htcpHandleTstResponse(htcpDataHeader * hdr, char *buf, int sz, struct sockaddr_i
         key = queried_keys[htcpReply.msg_id % N_QUERIED_KEYS];
         debug(31, 3) ("htcpHandleTstResponse: key (%p) %s\n", key, storeKeyText(key));
         neighborsHtcpReply(key, &htcpReply, from);
    +    httpHeaderClean(&htcpReply.hdr);
         if (d)
     	htcpFreeDetail(d);
     }
    @@ -847,6 +855,8 @@ htcpInit(void)
         } else {
     	htcpOutSocket = htcpInSocket;
         }
    +    memDataInit(MEM_HTCP_SPECIFIER, "htcpSpecifier", sizeof(htcpSpecifier), 0);
    +    memDataInit(MEM_HTCP_DETAIL, "htcpDetail", sizeof(htcpDetail), 0);
     }
     
     void
    @@ -879,6 +889,7 @@ htcpQuery(StoreEntry * e, request_t * req, peer * p)
         packerClean(&pa);
         stuff.S.req_hdrs = mb.buf;
         pkt = htcpBuildPacket(&stuff, &pktlen);
    +    memBufClean(&mb);
         if (pkt == NULL) {
     	debug(31, 0) ("htcpQuery: htcpBuildPacket() failed\n");
     	return;
    diff --git a/src/http.cc b/src/http.cc
    index ca7981eb2e..df354804ae 100644
    --- a/src/http.cc
    +++ b/src/http.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: http.cc,v 1.353 1999/09/29 00:22:14 wessels Exp $
    + * $Id: http.cc,v 1.354 1999/10/04 05:05:15 wessels Exp $
      *
      * DEBUG: section 11    Hypertext Transfer Protocol (HTTP)
      * AUTHOR: Harvest Derived
    @@ -291,81 +291,80 @@ httpProcessReplyHeader(HttpStateData * httpState, const char *buf, int size)
         char *t = NULL;
         StoreEntry *entry = httpState->entry;
         int room;
    -    int hdr_len;
    +    size_t hdr_len;
         HttpReply *reply = entry->mem_obj->reply;
    +    Ctx ctx;
         debug(11, 3) ("httpProcessReplyHeader: key '%s'\n",
     	storeKeyText(entry->key));
         if (httpState->reply_hdr == NULL)
     	httpState->reply_hdr = memAllocate(MEM_8K_BUF);
    -    if (httpState->reply_hdr_state == 0) {
    -	hdr_len = strlen(httpState->reply_hdr);
    -	room = 8191 - hdr_len;
    -	strncat(httpState->reply_hdr, buf, room < size ? room : size);
    -	hdr_len += room < size ? room : size;
    -	if (hdr_len > 4 && strncmp(httpState->reply_hdr, "HTTP/", 5)) {
    -	    debug(11, 3) ("httpProcessReplyHeader: Non-HTTP-compliant header: '%s'\n", httpState->reply_hdr);
    -	    httpState->reply_hdr_state += 2;
    -	    reply->sline.status = HTTP_INVALID_HEADER;
    -	    return;
    -	}
    -	t = httpState->reply_hdr + hdr_len;
    -	/* headers can be incomplete only if object still arriving */
    -	if (!httpState->eof) {
    -	    size_t k = headersEnd(httpState->reply_hdr, 8192);
    -	    if (0 == k)
    -		return;		/* headers not complete */
    -	    t = httpState->reply_hdr + k;
    -	}
    -	*t = '\0';
    -	httpState->reply_hdr_state++;
    -    }
    -    if (httpState->reply_hdr_state == 1) {
    -	const Ctx ctx = ctx_enter(entry->mem_obj->url);
    -	httpState->reply_hdr_state++;
    -	debug(11, 9) ("GOT HTTP REPLY HDR:\n---------\n%s\n----------\n",
    -	    httpState->reply_hdr);
    -	/* Parse headers into reply structure */
    -	/* what happens if we fail to parse here? */
    -	httpReplyParse(reply, httpState->reply_hdr);	/* httpState->eof); */
    -	storeTimestampsSet(entry);
    -	/* Check if object is cacheable or not based on reply code */
    -	debug(11, 3) ("httpProcessReplyHeader: HTTP CODE: %d\n", reply->sline.status);
    -	if (neighbors_do_private_keys)
    -	    httpMaybeRemovePublic(entry, reply->sline.status);
    -	switch (httpCachableReply(httpState)) {
    -	case 1:
    -	    httpMakePublic(entry);
    -	    break;
    -	case 0:
    -	    httpMakePrivate(entry);
    -	    break;
    -	case -1:
    -	    httpCacheNegatively(entry);
    -	    break;
    -	default:
    -	    assert(0);
    -	    break;
    -	}
    -	if (reply->cache_control) {
    -	    if (EBIT_TEST(reply->cache_control->mask, CC_PROXY_REVALIDATE))
    -		EBIT_SET(entry->flags, ENTRY_REVALIDATE);
    -	    else if (EBIT_TEST(reply->cache_control->mask, CC_MUST_REVALIDATE))
    -		EBIT_SET(entry->flags, ENTRY_REVALIDATE);
    -	}
    -	if (httpState->flags.keepalive)
    -	    if (httpState->peer)
    -		httpState->peer->stats.n_keepalives_sent++;
    -	if (reply->keep_alive)
    -	    if (httpState->peer)
    -		httpState->peer->stats.n_keepalives_recv++;
    -	ctx_exit(ctx);
    -	if (reply->date > -1 && !httpState->peer) {
    -	    int skew = abs(reply->date - squid_curtime);
    -	    if (skew > 86400)
    -		debug(11, 3) ("%s's clock is skewed by %d seconds!\n",
    -		    httpState->request->host, skew);
    -	}
    +    assert(httpState->reply_hdr_state == 0);
    +    hdr_len = strlen(httpState->reply_hdr);
    +    room = 8191 - hdr_len;
    +    strncat(httpState->reply_hdr, buf, room < size ? room : size);
    +    hdr_len += room < size ? room : size;
    +    if (hdr_len > 4 && strncmp(httpState->reply_hdr, "HTTP/", 5)) {
    +	debug(11, 3) ("httpProcessReplyHeader: Non-HTTP-compliant header: '%s'\n", httpState->reply_hdr);
    +	httpState->reply_hdr_state += 2;
    +	reply->sline.status = HTTP_INVALID_HEADER;
    +	return;
    +    }
    +    t = httpState->reply_hdr + hdr_len;
    +    /* headers can be incomplete only if object still arriving */
    +    if (!httpState->eof) {
    +	size_t k = headersEnd(httpState->reply_hdr, 8192);
    +	if (0 == k)
    +	    return;		/* headers not complete */
    +	t = httpState->reply_hdr + k;
    +    }
    +    *t = '\0';
    +    httpState->reply_hdr_state++;
    +    assert(httpState->reply_hdr_state == 1);
    +    ctx = ctx_enter(entry->mem_obj->url);
    +    httpState->reply_hdr_state++;
    +    debug(11, 9) ("GOT HTTP REPLY HDR:\n---------\n%s\n----------\n",
    +	httpState->reply_hdr);
    +    /* Parse headers into reply structure */
    +    /* what happens if we fail to parse here? */
    +    httpReplyParse(reply, httpState->reply_hdr, hdr_len);
    +    storeTimestampsSet(entry);
    +    /* Check if object is cacheable or not based on reply code */
    +    debug(11, 3) ("httpProcessReplyHeader: HTTP CODE: %d\n", reply->sline.status);
    +    if (neighbors_do_private_keys)
    +	httpMaybeRemovePublic(entry, reply->sline.status);
    +    switch (httpCachableReply(httpState)) {
    +    case 1:
    +	httpMakePublic(entry);
    +	break;
    +    case 0:
    +	httpMakePrivate(entry);
    +	break;
    +    case -1:
    +	httpCacheNegatively(entry);
    +	break;
    +    default:
    +	assert(0);
    +	break;
         }
    +    if (reply->cache_control) {
    +	if (EBIT_TEST(reply->cache_control->mask, CC_PROXY_REVALIDATE))
    +	    EBIT_SET(entry->flags, ENTRY_REVALIDATE);
    +	else if (EBIT_TEST(reply->cache_control->mask, CC_MUST_REVALIDATE))
    +	    EBIT_SET(entry->flags, ENTRY_REVALIDATE);
    +    }
    +    if (httpState->flags.keepalive)
    +	if (httpState->peer)
    +	    httpState->peer->stats.n_keepalives_sent++;
    +    if (reply->keep_alive)
    +	if (httpState->peer)
    +	    httpState->peer->stats.n_keepalives_recv++;
    +    if (reply->date > -1 && !httpState->peer) {
    +	int skew = abs(reply->date - squid_curtime);
    +	if (skew > 86400)
    +	    debug(11, 3) ("%s's clock is skewed by %d seconds!\n",
    +		httpState->request->host, skew);
    +    }
    +    ctx_exit(ctx);
     }
     
     static int
    @@ -616,8 +615,8 @@ httpBuildRequestHeader(request_t * request,
         HttpHeaderPos pos = HttpHeaderInitPos;
         httpHeaderInit(hdr_out, hoRequest);
         /* append our IMS header */
    -    if (entry && entry->lastmod > -1 && request->method == METHOD_GET)
    -	httpHeaderPutTime(hdr_out, HDR_IF_MODIFIED_SINCE, entry->lastmod);
    +    if (request->lastmod > -1 && request->method == METHOD_GET)
    +	httpHeaderPutTime(hdr_out, HDR_IF_MODIFIED_SINCE, request->lastmod);
     
         /* decide if we want to do Ranges ourselves 
          * (and fetch the whole object now)
    @@ -764,7 +763,7 @@ httpBuildRequestHeader(request_t * request,
     
     /* build request prefix and append it to a given MemBuf; 
      * return the length of the prefix */
    -size_t
    +mb_size_t
     httpBuildRequestPrefix(request_t * request,
         request_t * orig_request,
         StoreEntry * entry,
    @@ -863,6 +862,7 @@ httpStart(FwdState * fwd)
     	xstrncpy(proxy_req->host, httpState->peer->host, SQUIDHOSTNAMELEN);
     	proxy_req->port = httpState->peer->http_port;
     	proxy_req->flags = orig_req->flags;
    +	proxy_req->lastmod = orig_req->lastmod;
     	httpState->request = requestLink(proxy_req);
     	httpState->orig_request = requestLink(orig_req);
     	proxy_req->flags.proxying = 1;
    diff --git a/src/ipc.cc b/src/ipc.cc
    index 3d79039f58..d2f5018c73 100644
    --- a/src/ipc.cc
    +++ b/src/ipc.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: ipc.cc,v 1.15 1998/11/20 06:08:01 wessels Exp $
    + * $Id: ipc.cc,v 1.16 1999/10/04 05:05:16 wessels Exp $
      *
      * DEBUG: section 54    Interprocess Communication
      * AUTHOR: Duane Wessels
    @@ -182,15 +182,16 @@ ipcCreate(int type, const char *prog, char *const args[], const char *name, int
     	}
     	memset(hello_buf, '\0', HELLO_BUF_SZ);
     	if (type == IPC_UDP_SOCKET)
    -	    x = recv(prfd, hello_buf, HELLO_BUF_SZ, 0);
    +	    x = recv(prfd, hello_buf, HELLO_BUF_SZ - 1, 0);
     	else
    -	    x = read(prfd, hello_buf, HELLO_BUF_SZ);
    +	    x = read(prfd, hello_buf, HELLO_BUF_SZ - 1);
     	if (x < 0) {
     	    debug(50, 0) ("ipcCreate: PARENT: hello read test failed\n");
     	    debug(50, 0) ("--> read: %s\n", xstrerror());
     	    return ipcCloseAllFD(prfd, pwfd, crfd, cwfd);
     	} else if (strcmp(hello_buf, hello_string)) {
     	    debug(54, 0) ("ipcCreate: PARENT: hello read test failed\n");
    +	    debug(54, 0) ("--> read returned %d\n", x);
     	    debug(54, 0) ("--> got '%s'\n", rfc1738_escape(hello_buf));
     	    return ipcCloseAllFD(prfd, pwfd, crfd, cwfd);
     	}
    diff --git a/src/main.cc b/src/main.cc
    index e880a43e5b..8a88f5bfbe 100644
    --- a/src/main.cc
    +++ b/src/main.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: main.cc,v 1.304 1999/08/02 06:18:38 wessels Exp $
    + * $Id: main.cc,v 1.305 1999/10/04 05:05:17 wessels Exp $
      *
      * DEBUG: section 1     Startup and Main Loop
      * AUTHOR: Harvest Derived
    @@ -72,6 +72,10 @@ extern void log_trace_init(char *);
     static EVH SquidShutdown;
     static void mainSetCwd(void);
     
    +#if TEST_ACCESS
    +#include "test_access.c"
    +#endif
    +
     static void
     usage(void)
     {
    @@ -482,7 +486,6 @@ mainInitialize(void)
     	unlinkdInit();
     	urlInitialize();
     	cachemgrInit();
    -	eventInit();		/* eventInit() before statInit() */
     	statInit();
     	storeInit();
     	mainSetCwd();
    @@ -588,12 +591,21 @@ main(int argc, char **argv)
     	leakInit();
     #endif
     	memInit();		/* memInit is required for config parsing */
    +	eventInit();		/* eventInit() is required for config parsing */
     	parse_err = parseConfigFile(ConfigFile);
     
     	if (opt_parse_cfg_only)
     	    return parse_err;
         }
     
    +#if TEST_ACCESS
    +    comm_init();
    +    comm_select_init();
    +    mainInitialize();
    +    test_access();
    +    return 0;
    +#endif
    +
         /* send signal to running copy and exit */
         if (opt_send_signal != -1) {
     	sendSignal();
    diff --git a/src/neighbors.cc b/src/neighbors.cc
    index 30307afa9a..dce2842163 100644
    --- a/src/neighbors.cc
    +++ b/src/neighbors.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: neighbors.cc,v 1.275 1999/06/16 22:10:40 wessels Exp $
    + * $Id: neighbors.cc,v 1.276 1999/10/04 05:05:19 wessels Exp $
      *
      * DEBUG: section 15    Neighbor Routines
      * AUTHOR: Harvest Derived
    @@ -102,7 +102,7 @@ neighborType(const peer * p, const request_t * request)
     {
         const struct _domain_type *d = NULL;
         for (d = p->typelist; d; d = d->next) {
    -	if (matchDomainName(d->domain, request->host))
    +	if (0 == matchDomainName(d->domain, request->host))
     	    if (d->type != PEER_NONE)
     		return d->type;
         }
    @@ -136,7 +136,7 @@ peerAllowedToUse(const peer * p, request_t * request)
     	return do_ping;
         do_ping = 0;
         for (d = p->peer_domain; d; d = d->next) {
    -	if (matchDomainName(d->domain, request->host)) {
    +	if (0 == matchDomainName(d->domain, request->host)) {
     	    do_ping = d->do_ping;
     	    break;
     	}
    @@ -1306,7 +1306,7 @@ neighborsHtcpReply(const cache_key * key, htcpReplyData * htcp, const struct soc
     	neighborCountIgnored(p);
     	return;
         }
    -    debug(15, 1) ("neighborsHtcpReply: e = %p\n", e);
    +    debug(15, 3) ("neighborsHtcpReply: e = %p\n", e);
         mem->ping_reply_callback(p, ntype, PROTO_HTCP, htcp, mem->ircb_data);
     }
     #endif
    diff --git a/src/net_db.cc b/src/net_db.cc
    index c4135c877f..e6b2b95676 100644
    --- a/src/net_db.cc
    +++ b/src/net_db.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: net_db.cc,v 1.139 1999/05/04 21:58:29 wessels Exp $
    + * $Id: net_db.cc,v 1.140 1999/10/04 05:05:20 wessels Exp $
      *
      * DEBUG: section 38    Network Measurement Database
      * AUTHOR: Duane Wessels
    @@ -528,7 +528,7 @@ netdbExchangeHandleReply(void *data, char *buf, ssize_t size)
     	    debug(38, 5) ("netdbExchangeHandleReply: hdr_sz = %d\n", hdr_sz);
     	    rep = ex->e->mem_obj->reply;
     	    if (0 == rep->sline.status)
    -		httpReplyParse(rep, buf);
    +		httpReplyParse(rep, buf, hdr_sz);
     	    debug(38, 3) ("netdbExchangeHandleReply: reply status %d\n",
     		rep->sline.status);
     	    if (HTTP_OK != rep->sline.status) {
    @@ -973,6 +973,8 @@ netdbExchangeStart(void *data)
         storeClientCopy(ex->e, ex->seen, ex->used, ex->buf_sz,
     	ex->buf, netdbExchangeHandleReply, ex);
         ex->r->flags.loopdetect = 1;	/* cheat! -- force direct */
    +    if (p->login)
    +	xstrncpy(ex->r->login, p->login, MAX_LOGIN_SZ);
         fwdStart(-1, ex->e, ex->r, no_addr, no_addr);
     #endif
     }
    diff --git a/src/peer_digest.cc b/src/peer_digest.cc
    index 14204b1cd9..c6ff44891b 100644
    --- a/src/peer_digest.cc
    +++ b/src/peer_digest.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: peer_digest.cc,v 1.70 1999/01/29 21:28:17 wessels Exp $
    + * $Id: peer_digest.cc,v 1.71 1999/10/04 05:05:20 wessels Exp $
      *
      * DEBUG: section 72    Peer Digest Routines
      * AUTHOR: Alex Rousskov
    @@ -285,7 +285,8 @@ peerDigestRequest(PeerDigest * pd)
         assert(!req->header.len);
         httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr);
         httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html");
    -
    +    if (p->login)
    +	xstrncpy(req->login, p->login, MAX_LOGIN_SZ);
         /* create fetch state structure */
         fetch = memAllocate(MEM_DIGEST_FETCH_STATE);
         cbdataAdd(fetch, memFree, MEM_DIGEST_FETCH_STATE);
    @@ -330,17 +331,18 @@ peerDigestFetchReply(void *data, char *buf, ssize_t size)
     {
         DigestFetchState *fetch = data;
         PeerDigest *pd = fetch->pd;
    +    size_t hdr_size;
         assert(pd && buf);
         assert(!fetch->offset);
     
         if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
     	return;
     
    -    if (headersEnd(buf, size)) {
    +    if ((hdr_size = headersEnd(buf, size))) {
     	http_status status;
     	HttpReply *reply = fetch->entry->mem_obj->reply;
     	assert(reply);
    -	httpReplyParse(reply, buf);
    +	httpReplyParse(reply, buf, hdr_size);
     	status = reply->sline.status;
     	debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %d (%+d)\n",
     	    strBuf(pd->host), status,
    @@ -410,7 +412,7 @@ peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
         if ((hdr_size = headersEnd(buf, size))) {
     	assert(fetch->entry->mem_obj->reply);
     	if (!fetch->entry->mem_obj->reply->sline.status)
    -	    httpReplyParse(fetch->entry->mem_obj->reply, buf);
    +	    httpReplyParse(fetch->entry->mem_obj->reply, buf, hdr_size);
     	if (fetch->entry->mem_obj->reply->sline.status != HTTP_OK) {
     	    debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n",
     		strBuf(fetch->pd->host), fetch->entry->mem_obj->reply->sline.status);
    diff --git a/src/peer_select.cc b/src/peer_select.cc
    index 6a6386fa5b..18bdc78765 100644
    --- a/src/peer_select.cc
    +++ b/src/peer_select.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: peer_select.cc,v 1.100 1999/05/19 19:57:49 wessels Exp $
    + * $Id: peer_select.cc,v 1.101 1999/10/04 05:05:21 wessels Exp $
      *
      * DEBUG: section 44    Peer Selection Algorithm
      * AUTHOR: Duane Wessels
    @@ -52,7 +52,8 @@ const char *hier_strings[] =
         "SOURCE_FASTEST",
         "ROUNDROBIN_PARENT",
     #if USE_CACHE_DIGESTS
    -    "CACHE_DIGEST_HIT",
    +    "CD_PARENT_HIT",
    +    "CD_SIBLING_HIT",
     #endif
     #if USE_CARP
         "CARP",
    @@ -310,7 +311,10 @@ peerGetSomeNeighbor(ps_state * ps)
         }
     #if USE_CACHE_DIGESTS
         if ((p = neighborsDigestSelect(request, entry))) {
    -	code = CACHE_DIGEST_HIT;
    +	if (neighborType(p, request) == PEER_PARENT)
    +	    code = CD_PARENT_HIT;
    +	else
    +	    code = CD_SIBLING_HIT;
         } else
     #endif
     #if USE_CARP
    @@ -534,7 +538,6 @@ static void
     peerHandleHtcpReply(peer * p, peer_t type, htcpReplyData * htcp, void *data)
     {
         ps_state *psstate = data;
    -    request_t *request = psstate->request;
         debug(44, 3) ("peerHandleIcpReply: %s %s\n",
     	htcp->hit ? "HIT" : "MISS",
     	storeUrl(psstate->entry));
    diff --git a/src/protos.h b/src/protos.h
    index 763a4ca2ac..9106de4967 100644
    --- a/src/protos.h
    +++ b/src/protos.h
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: protos.h,v 1.346 1999/09/29 00:22:16 wessels Exp $
    + * $Id: protos.h,v 1.347 1999/10/04 05:05:22 wessels Exp $
      *
      *
      * SQUID Internet Object Cache  http://squid.nlanr.net/Squid/
    @@ -129,6 +129,7 @@ extern void clientdbUpdate(struct in_addr, log_type, protocol_t, size_t);
     extern int clientdbCutoffDenied(struct in_addr);
     extern void clientdbDump(StoreEntry *);
     extern void clientdbFreeMemory(void);
    +extern int clientdbEstablished(struct in_addr, int);
     
     extern void clientAccessCheck(void *);
     extern void clientAccessCheckDone(int, void *);
    @@ -283,13 +284,13 @@ extern int httpCachable(method_t);
     extern void httpStart(FwdState *);
     extern void httpParseReplyHeaders(const char *, http_reply *);
     extern void httpProcessReplyHeader(HttpStateData *, const char *, int);
    -extern size_t httpBuildRequestPrefix(request_t * request,
    +extern mb_size_t httpBuildRequestPrefix(request_t * request,
         request_t * orig_request,
         StoreEntry * entry,
         MemBuf * mb,
         int cfd,
         http_state_flags);
    -extern void httpAnonInitModule();
    +extern void httpAnonInitModule(void);
     extern int httpAnonHdrAllowed(http_hdr_type hdr_id);
     extern int httpAnonHdrDenied(http_hdr_type hdr_id);
     extern void httpBuildRequestHeader(request_t *, request_t *, StoreEntry *, HttpHeader *, int, http_state_flags);
    @@ -327,9 +328,9 @@ extern void httpBodySet(HttpBody * body, MemBuf * mb);
     extern void httpBodyPackInto(const HttpBody * body, Packer * p);
     
     /* Http Cache Control Header Field */
    -extern void httpHdrCcInitModule();
    -extern void httpHdrCcCleanModule();
    -extern HttpHdrCc *httpHdrCcCreate();
    +extern void httpHdrCcInitModule(void);
    +extern void httpHdrCcCleanModule(void);
    +extern HttpHdrCc *httpHdrCcCreate(void);
     extern HttpHdrCc *httpHdrCcParseCreate(const String * str);
     extern void httpHdrCcDestroy(HttpHdrCc * cc);
     extern HttpHdrCc *httpHdrCcDup(const HttpHdrCc * cc);
    @@ -349,17 +350,17 @@ extern void httpHdrRangePackInto(const HttpHdrRange * range, Packer * p);
     /* iterate through specs */
     extern HttpHdrRangeSpec *httpHdrRangeGetSpec(const HttpHdrRange * range, HttpHdrRangePos * pos);
     /* adjust specs after the length is known */
    -extern int httpHdrRangeCanonize(HttpHdrRange * range, size_t clen);
    +extern int httpHdrRangeCanonize(HttpHdrRange *, ssize_t);
     /* other */
     extern String httpHdrRangeBoundaryStr(clientHttpRequest * http);
     extern int httpHdrRangeIsComplex(const HttpHdrRange * range);
     extern int httpHdrRangeWillBeComplex(const HttpHdrRange * range);
    -extern size_t httpHdrRangeFirstOffset(const HttpHdrRange * range);
    -extern size_t httpHdrRangeLowestOffset(const HttpHdrRange * range, size_t size);
    +extern ssize_t httpHdrRangeFirstOffset(const HttpHdrRange * range);
    +extern ssize_t httpHdrRangeLowestOffset(const HttpHdrRange * range, ssize_t);
     
     
     /* Http Content Range Header Field */
    -extern HttpHdrContRange *httpHdrContRangeCreate();
    +extern HttpHdrContRange *httpHdrContRangeCreate(void);
     extern HttpHdrContRange *httpHdrContRangeParseCreate(const char *crange_spec);
     /* returns true if range is valid; inits HttpHdrContRange */
     extern int httpHdrContRangeParseInit(HttpHdrContRange * crange, const char *crange_spec);
    @@ -367,7 +368,7 @@ extern void httpHdrContRangeDestroy(HttpHdrContRange * crange);
     extern HttpHdrContRange *httpHdrContRangeDup(const HttpHdrContRange * crange);
     extern void httpHdrContRangePackInto(const HttpHdrContRange * crange, Packer * p);
     /* inits with given spec */
    -extern void httpHdrContRangeSet(HttpHdrContRange *, HttpHdrRangeSpec, size_t ent_len);
    +extern void httpHdrContRangeSet(HttpHdrContRange *, HttpHdrRangeSpec, ssize_t);
     
     /* Http Header Tools */
     extern HttpHeaderFieldInfo *httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs * attrs, int count);
    @@ -377,26 +378,25 @@ extern int httpHeaderIdByNameDef(const char *name, int name_len);
     extern void httpHeaderMaskInit(HttpHeaderMask * mask, int value);
     extern void httpHeaderCalcMask(HttpHeaderMask * mask, const int *enums, int count);
     extern int httpHeaderHasConnDir(const HttpHeader * hdr, const char *directive);
    -extern void httpHeaderAddContRange(HttpHeader * hdr, HttpHdrRangeSpec spec, size_t ent_len);
    +extern void httpHeaderAddContRange(HttpHeader *, HttpHdrRangeSpec, ssize_t);
     extern void strListAdd(String * str, const char *item, char del);
     extern int strListIsMember(const String * str, const char *item, char del);
     extern int strListIsSubstr(const String * list, const char *s, char del);
     extern int strListGetItem(const String * str, char del, const char **item, int *ilen, const char **pos);
     extern const char *getStringPrefix(const char *str, const char *end);
     extern int httpHeaderParseInt(const char *start, int *val);
    -extern int httpHeaderParseSize(const char *start, size_t * sz);
    +extern int httpHeaderParseSize(const char *start, ssize_t * sz);
     extern int httpHeaderReset(HttpHeader * hdr);
     #if STDC_HEADERS
     extern void httpHeaderPutStrf(HttpHeader * hdr, http_hdr_type id, const char *fmt,...);
     #else
    -extern void
    -     httpHeaderPutStrf();
    +extern void httpHeaderPutStrf();
     #endif
     
     
     /* Http Header */
    -extern void httpHeaderInitModule();
    -extern void httpHeaderCleanModule();
    +extern void httpHeaderInitModule(void);
    +extern void httpHeaderCleanModule(void);
     /* init/clean */
     extern void httpHeaderInit(HttpHeader * hdr, http_hdr_owner_type owner);
     extern void httpHeaderClean(HttpHeader * hdr);
    @@ -444,16 +444,16 @@ extern int httpMsgIsPersistent(float http_ver, const HttpHeader * hdr);
     extern int httpMsgIsolateHeaders(const char **parse_start, const char **blk_start, const char **blk_end);
     
     /* Http Reply */
    -extern void httpReplyInitModule();
    +extern void httpReplyInitModule(void);
     /* create/destroy */
    -extern HttpReply *httpReplyCreate();
    +extern HttpReply *httpReplyCreate(void);
     extern void httpReplyDestroy(HttpReply * rep);
     /* reset: clean, then init */
     extern void httpReplyReset(HttpReply * rep);
     /* absorb: copy the contents of a new reply to the old one, destroy new one */
     extern void httpReplyAbsorb(HttpReply * rep, HttpReply * new_rep);
     /* parse returns -1,0,+1 on error,need-more-data,success */
    -extern int httpReplyParse(HttpReply * rep, const char *buf);	/*, int atEnd); */
    +extern int httpReplyParse(HttpReply * rep, const char *buf, ssize_t);
     extern void httpReplyPackInto(const HttpReply * rep, Packer * p);
     /* ez-routines */
     /* mem-pack: returns a ready to use mem buffer with a packed reply */
    @@ -1112,6 +1112,7 @@ extern double gb_to_double(const gb_t *);
     extern const char *gb_to_str(const gb_t *);
     extern void gb_flush(gb_t *);	/* internal, do not use this */
     extern int stringHasWhitespace(const char *);
    +extern int stringHasCntl(const char *);
     extern void linklistPush(link_list **, void *);
     extern void *linklistShift(link_list **);
     extern int xrename(const char *from, const char *to);
    diff --git a/src/redirect.cc b/src/redirect.cc
    index 0ee0bf37b6..661f5ef8c8 100644
    --- a/src/redirect.cc
    +++ b/src/redirect.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: redirect.cc,v 1.81 1999/06/24 22:08:43 wessels Exp $
    + * $Id: redirect.cc,v 1.82 1999/10/04 05:05:24 wessels Exp $
      *
      * DEBUG: section 29    Redirector
      * AUTHOR: Duane Wessels
    @@ -103,6 +103,18 @@ redirectStart(clientHttpRequest * http, RH * handler, void *data)
     	handler(data, NULL);
     	return;
         }
    +    if (Config.accessList.redirector) {
    +	aclCheck_t ch;
    +	memset(&ch, '\0', sizeof(ch));
    +	ch.src_addr = http->conn->peer.sin_addr;
    +	ch.my_addr = http->conn->me.sin_addr;
    +	ch.request = http->request;
    +	if (!aclCheckFast(Config.accessList.redirector, &ch)) {
    +	    /* denied -- bypass redirector */
    +	    handler(data, NULL);
    +	    return;
    +	}
    +    }
         if (Config.onoff.redirector_bypass && redirectors->stats.queue_size) {
     	/* Skip redirector if there is one request queued */
     	n_bypassed++;
    diff --git a/src/refresh.cc b/src/refresh.cc
    index 8ae1e57dc7..2bc5e76389 100644
    --- a/src/refresh.cc
    +++ b/src/refresh.cc
    @@ -1,7 +1,7 @@
     
     
     /*
    - * $Id: refresh.cc,v 1.49 1999/06/10 06:10:34 wessels Exp $
    + * $Id: refresh.cc,v 1.50 1999/10/04 05:05:24 wessels Exp $
      *
      * DEBUG: section 22    Refresh Calculation
      * AUTHOR: Harvest Derived
    @@ -386,7 +386,7 @@ refreshStats(StoreEntry * sentry)
     }
     
     void
    -refreshInit()
    +refreshInit(void)
     {
         memset(refreshCounts, 0, sizeof(refreshCounts));
         refreshCounts[rcHTTP].proto = "HTTP";
    diff --git a/src/snmp_agent.cc b/src/snmp_agent.cc
    index d999b028fb..9aac952b8c 100644
    --- a/src/snmp_agent.cc
    +++ b/src/snmp_agent.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: snmp_agent.cc,v 1.71 1999/06/17 22:20:42 wessels Exp $
    + * $Id: snmp_agent.cc,v 1.72 1999/10/04 05:05:25 wessels Exp $
      *
      * DEBUG: section 49     SNMP Interface
      * AUTHOR: Kostas Anagnostakis
    @@ -280,7 +280,11 @@ snmp_prfSysFn(variable_list * Var, snint * ErrP)
     	break;
         case PERF_SYS_CURLRUEXP:
     	Answer = snmp_var_new_integer(Var->name, Var->name_length,
    +#if !HEAP_REPLACEMENT
     	    (snint) (storeExpiredReferenceAge() * 100),
    +#else
    +	    0,
    +#endif
     	    SMI_TIMETICKS);
     	break;
         case PERF_SYS_CURUNLREQ:
    diff --git a/src/snmp_core.cc b/src/snmp_core.cc
    index a8128370be..544f2fa436 100644
    --- a/src/snmp_core.cc
    +++ b/src/snmp_core.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: snmp_core.cc,v 1.39 1999/06/17 22:20:43 wessels Exp $
    + * $Id: snmp_core.cc,v 1.40 1999/10/04 05:05:26 wessels Exp $
      *
      * DEBUG: section 49    SNMP support
      * AUTHOR: Glenn Chisholm
    @@ -483,7 +483,7 @@ snmpHandleUdp(int sock, void *not_used)
     /*
      * Turn SNMP packet into a PDU, check available ACL's
      */
    -void
    +static void
     snmpDecodePacket(snmp_request_t * rq)
     {
         struct snmp_pdu *PDU;
    @@ -519,7 +519,7 @@ snmpDecodePacket(snmp_request_t * rq)
     /*
      * Packet OK, ACL Check OK, Create reponse.
      */
    -void
    +static void
     snmpConstructReponse(snmp_request_t * rq)
     {
         struct snmp_session Session;
    @@ -546,7 +546,7 @@ snmpConstructReponse(snmp_request_t * rq)
      * 
      * If configured forward any reponses which are not for this agent.
      */
    -struct snmp_pdu *
    +static struct snmp_pdu *
     snmpAgentResponse(struct snmp_pdu *PDU)
     {
         struct snmp_pdu *Answer = NULL;
    @@ -632,7 +632,7 @@ snmpAgentResponse(struct snmp_pdu *PDU)
         return (Answer);
     }
     
    -oid_ParseFn *
    +static oid_ParseFn *
     snmpTreeGet(oid * Current, snint CurrentLen)
     {
         oid_ParseFn *Fn = NULL;
    @@ -659,7 +659,7 @@ snmpTreeGet(oid * Current, snint CurrentLen)
         return (Fn);
     }
     
    -oid_ParseFn *
    +static oid_ParseFn *
     snmpTreeNext(oid * Current, snint CurrentLen, oid ** Next, snint * NextLen)
     {
         oid_ParseFn *Fn = NULL;
    @@ -725,7 +725,7 @@ snmpTreeNext(oid * Current, snint CurrentLen, oid ** Next, snint * NextLen)
         return (Fn);
     }
     
    -oid *
    +static oid *
     static_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
     {
         oid *instance = NULL;
    @@ -740,7 +740,7 @@ static_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn
         return (instance);
     }
     
    -oid *
    +static oid *
     time_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
     {
         oid *instance = NULL;
    @@ -767,7 +767,7 @@ time_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
         return (instance);
     }
     
    -oid *
    +static oid *
     peer_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
     {
         oid *instance = NULL;
    @@ -826,7 +826,7 @@ peer_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
         return (instance);
     }
     
    -oid *
    +static oid *
     client_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn)
     {
         oid *instance = NULL;
    @@ -874,7 +874,7 @@ client_Inst(oid * name, snint * len, mib_tree_entry * current, oid_ParseFn ** Fn
     /* 
      * Returns a the sibling object in the tree
      */
    -mib_tree_entry *
    +static mib_tree_entry *
     snmpTreeSiblingEntry(oid entry, snint len, mib_tree_entry * current)
     {
         mib_tree_entry *next = NULL;
    @@ -897,7 +897,7 @@ snmpTreeSiblingEntry(oid entry, snint len, mib_tree_entry * current)
     /* 
      * Returns the requested child object or NULL if it does not exist
      */
    -mib_tree_entry *
    +static mib_tree_entry *
     snmpTreeEntry(oid entry, snint len, mib_tree_entry * current)
     {
         mib_tree_entry *next = NULL;
    @@ -915,7 +915,7 @@ snmpTreeEntry(oid entry, snint len, mib_tree_entry * current)
     /*
      * Adds a node to the MIB tree structure and adds the appropriate children
      */
    -mib_tree_entry *
    +static mib_tree_entry *
     #if STDC_HEADERS
     snmpAddNode(oid * name, int len, oid_ParseFn * parsefunction, instance_Fn * instancefunction, int children,...)
     #else
    @@ -968,7 +968,7 @@ snmpAddNode(va_alist)
     /* 
      * Returns the list of parameters in an oid
      */
    -oid *
    +static oid *
     #if STDC_HEADERS
     snmpCreateOid(int length,...)
     #else
    @@ -1002,7 +1002,7 @@ snmpCreateOid(va_alist)
     /*
      * Allocate space for, and copy, an OID.  Returns new oid.
      */
    -oid *
    +static oid *
     snmpOidDup(oid * A, snint ALen)
     {
         oid *Ans = xmalloc(sizeof(oid) * ALen);
    diff --git a/src/squid.h b/src/squid.h
    index 1c42ae6cd1..c223d6a679 100644
    --- a/src/squid.h
    +++ b/src/squid.h
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: squid.h,v 1.194 1999/09/28 23:48:49 wessels Exp $
    + * $Id: squid.h,v 1.195 1999/10/04 05:05:27 wessels Exp $
      *
      * AUTHOR: Duane Wessels
      *
    @@ -51,10 +51,12 @@
     #define CHANGE_FD_SETSIZE 0
     #endif
     
    -/* Cannot increase FD_SETSIZE on FreeBSD before 2.2.0, causes select(2)
    - * to return EINVAL. */
    -/* Marian Durkovic  */
    -/* Peter Wemm  */
    +/*
    + * Cannot increase FD_SETSIZE on FreeBSD before 2.2.0, causes select(2)
    + * to return EINVAL.
    + * --Marian Durkovic 
    + * --Peter Wemm 
    + */
     #if defined(_SQUID_FREEBSD_)
     #include 
     #if __FreeBSD_version < 220000
    @@ -63,6 +65,15 @@
     #endif
     #endif
     
    +/*
    + * Trying to redefine CHANGE_FD_SETSIZE causes a slew of warnings
    + * on Mac OS X Server.
    + */
    +#if defined(_SQUID_APPLE_)
    +#undef CHANGE_FD_SETSIZE
    +#define CHANGE_FD_SETSIZE 0
    +#endif
    +
     /* Increase FD_SETSIZE if SQUID_MAXFD is bigger */
     #if CHANGE_FD_SETSIZE && SQUID_MAXFD > DEFAULT_FD_SETSIZE
     #define FD_SETSIZE SQUID_MAXFD
    @@ -359,7 +370,14 @@ struct rusage {
     #include "globals.h"
     
     #include "util.h"
    +
    +/*
    + * Mac OS X Server already has radix.h as a standard header, so
    + * this causes conflicts.
    +*/
    +#ifndef _SQUID_APPLE_
     #include "radix.h"
    +#endif
     
     #if !HAVE_TEMPNAM
     #include "tempnam.h"
    diff --git a/src/ssl.cc b/src/ssl.cc
    index 12efc2cde6..a90e04424d 100644
    --- a/src/ssl.cc
    +++ b/src/ssl.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: ssl.cc,v 1.99 1999/08/02 06:18:41 wessels Exp $
    + * $Id: ssl.cc,v 1.100 1999/10/04 05:05:28 wessels Exp $
      *
      * DEBUG: section 26    Secure Sockets Layer Proxy
      * AUTHOR: Duane Wessels
    @@ -206,8 +206,8 @@ sslReadServer(int fd, void *data)
         }
         cbdataLock(sslState);
         if (len < 0) {
    -	debug(50, 1) ("sslReadServer: FD %d: read failure: %s\n",
    -	    fd, xstrerror());
    +	debug(50, ignoreErrno(errno) ? 3 : 1)
    +	    ("sslReadServer: FD %d: read failure: %s\n", fd, xstrerror());
     	if (!ignoreErrno(errno))
     	    comm_close(fd);
         } else if (len == 0) {
    @@ -281,8 +281,8 @@ sslWriteServer(int fd, void *data)
         }
         cbdataLock(sslState);
         if (len < 0) {
    -	debug(50, 1) ("sslWriteServer: FD %d: write failure: %s.\n",
    -	    fd, xstrerror());
    +	debug(50, ignoreErrno(errno) ? 3 : 1)
    +	    ("sslWriteServer: FD %d: write failure: %s.\n", fd, xstrerror());
     	if (!ignoreErrno(errno))
     	    comm_close(fd);
         }
    @@ -322,8 +322,8 @@ sslWriteClient(int fd, void *data)
         }
         cbdataLock(sslState);
         if (len < 0) {
    -	debug(50, 1) ("sslWriteClient: FD %d: write failure: %s.\n",
    -	    fd, xstrerror());
    +	debug(50, ignoreErrno(errno) ? 3 : 1)
    +	    ("sslWriteClient: FD %d: write failure: %s.\n", fd, xstrerror());
     	if (!ignoreErrno(errno))
     	    comm_close(fd);
         }
    diff --git a/src/stat.cc b/src/stat.cc
    index 7e89ae9d68..2916bff99d 100644
    --- a/src/stat.cc
    +++ b/src/stat.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: stat.cc,v 1.320 1999/07/13 14:51:19 wessels Exp $
    + * $Id: stat.cc,v 1.321 1999/10/04 05:05:29 wessels Exp $
      *
      * DEBUG: section 18    Cache Manager Statistics
      * AUTHOR: Harvest Derived
    @@ -79,7 +79,7 @@ static OBJH statCountersHistograms;
     static OBJH statClientRequests;
     
     #ifdef XMALLOC_STATISTICS
    -static void info_get_mallstat(int, int, StoreEntry *);
    +static void info_get_mallstat(int, int, void *);
     #endif
     
     StatCounters CountHist[N_COUNT_HIST];
    @@ -383,8 +383,9 @@ statOpenfdObj(StoreEntry * sentry)
     
     #ifdef XMALLOC_STATISTICS
     static void
    -info_get_mallstat(int size, int number, StoreEntry * sentry)
    +info_get_mallstat(int size, int number, void *data)
     {
    +    StoreEntry * sentry = data;
         if (number > 0)
     	storeAppendPrintf(sentry, "\t%d = %d\n", size, number);
     }
    diff --git a/src/store.cc b/src/store.cc
    index a84501ffd0..238c5d614b 100644
    --- a/src/store.cc
    +++ b/src/store.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: store.cc,v 1.508 1999/09/29 00:22:18 wessels Exp $
    + * $Id: store.cc,v 1.509 1999/10/04 05:05:31 wessels Exp $
      *
      * DEBUG: section 20    Storage Manager
      * AUTHOR: Harvest Derived
    @@ -711,15 +711,15 @@ storeGetMemSpace(int size)
         int released = 0;
         static time_t last_check = 0;
         int pages_needed;
    -    dlink_node *m;
    -    dlink_node *prev = NULL;
         int locked = 0;
     #if !HEAP_REPLACEMENT
         dlink_node *head;
    +    dlink_node *m;
    +    dlink_node *prev = NULL;
     #else
         heap *heap = inmem_heap;
         heap_key age, min_age = 0.0;
    -    linklist *locked_entries = NULL;
    +    link_list *locked_entries = NULL;
     #endif
         if (squid_curtime == last_check)
     	return;
    @@ -739,7 +739,7 @@ storeGetMemSpace(int size)
     	    locked++;
     	    debug(20, 5) ("storeGetMemSpace: locked key %s\n",
     		storeKeyText(e->key));
    -	    linklistPush(e, &locked_entries);
    +	    linklistPush(&locked_entries, e);
     	    continue;
     	}
     	released++;
    @@ -758,7 +758,7 @@ storeGetMemSpace(int size)
         /*
          * Reinsert all bumped locked entries back into heap...
          */
    -    while ((e = linklistPop(&locked_entries)))
    +    while ((e = linklistShift(&locked_entries)))
     	e->mem_obj->node = heap_insert(inmem_heap, e);
     #else
         head = inmem_list.head;
    @@ -800,8 +800,6 @@ storeGetMemSpace(int size)
     void
     storeMaintainSwapSpace(void *datanotused)
     {
    -    dlink_node *m;
    -    dlink_node *prev = NULL;
         StoreEntry *e = NULL;
         int scanned = 0;
         int locked = 0;
    @@ -810,10 +808,13 @@ storeMaintainSwapSpace(void *datanotused)
         int max_remove;
         double f;
         static time_t last_warn_time = 0;
    -#if HEAP_REPLACEMENT
    +#if !HEAP_REPLACEMENT
    +    dlink_node *m;
    +    dlink_node *prev = NULL;
    +#else
         heap *heap = store_heap;
         heap_key age, min_age = 0.0;
    -    linklist *locked_entries = NULL;
    +    link_list *locked_entries = NULL;
     #if HEAP_REPLACEMENT_DEBUG
         if (!verify_heap_property(store_heap)) {
     	debug(20, 1) ("Heap property violated!\n");
    @@ -835,6 +836,12 @@ storeMaintainSwapSpace(void *datanotused)
     	f, max_scan, max_remove);
     #if HEAP_REPLACEMENT
         while (heap_nodes(heap) > 0) {
    +	if (store_swap_size < store_swap_low)
    +	    break;
    +	if (expired >= max_remove)
    +	    break;
    +	if (scanned >= max_scan)
    +	    break;
     	age = heap_peepminkey(heap);
     	e = heap_extractmin(heap);
     	e->node = NULL;		/* no longer in the heap */
    @@ -850,7 +857,7 @@ storeMaintainSwapSpace(void *datanotused)
     		 */
     		debug(20, 4) ("storeMaintainSwapSpace: locked url %s\n",
     		    (e->mem_obj && e->mem_obj->url) ? e->mem_obj->url : storeKeyText(e->key));
    -		linklistPush(e, &locked_entries);
    +		linklistPush(&locked_entries, e);
     	    }
     	    locked++;
     	    continue;
    @@ -872,13 +879,9 @@ storeMaintainSwapSpace(void *datanotused)
     	     */
     	    debug(20, 5) ("storeMaintainSwapSpace: non-expired %s\n",
     		storeKeyText(e->key));
    -	    linklistAdd(e, &locked_entries);
    +	    linklistPush(&locked_entries, e);
     	    continue;
     	}
    -	if ((store_swap_size < store_swap_low)
    -	    || (expired >= max_remove)
    -	    || (scanned >= max_scan))
    -	    break;
         }
         /*
          * Bump the heap age factor.
    @@ -888,7 +891,7 @@ storeMaintainSwapSpace(void *datanotused)
         /*
          * Reinsert all bumped locked entries back into heap...
          */
    -    while ((e = linklistPop(&locked_entries)))
    +    while ((e = linklistShift(&locked_entries)))
     	e->node = heap_insert(store_heap, e);
     #else
         for (m = store_list.tail; m; m = prev) {
    @@ -1229,7 +1232,7 @@ storeFreeMemory(void)
         hashFreeItems(store_table, destroy_StoreEntry);
         hashFreeMemory(store_table);
         store_table = NULL;
    -#if USE_CACHE_DIGEST
    +#if USE_CACHE_DIGESTS
         if (store_digest)
     	cacheDigestDestroy(store_digest);
     #endif
    @@ -1454,6 +1457,7 @@ storeEntryReset(StoreEntry * e)
         mem->inmem_hi = mem->inmem_lo = 0;
         httpReplyDestroy(mem->reply);
         mem->reply = httpReplyCreate();
    +    e->expires = e->lastmod = e->timestamp = -1;
     }
     
     #if HEAP_REPLACEMENT
    diff --git a/src/store_client.cc b/src/store_client.cc
    index 6d58137d8c..64b4dd2cb2 100644
    --- a/src/store_client.cc
    +++ b/src/store_client.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: store_client.cc,v 1.76 1999/09/29 00:10:33 wessels Exp $
    + * $Id: store_client.cc,v 1.77 1999/10/04 05:05:32 wessels Exp $
      *
      * DEBUG: section 20    Storage Manager Client-Side Interface
      * AUTHOR: Duane Wessels
    @@ -333,7 +333,7 @@ storeClientReadBody(void *data, const char *buf, ssize_t len)
         assert(sc->callback != NULL);
         debug(20, 3) ("storeClientReadBody: len %d\n", len);
         if (sc->copy_offset == 0 && len > 0 && mem->reply->sline.status == 0)
    -	httpReplyParse(mem->reply, sc->copy_buf);
    +	httpReplyParse(mem->reply, sc->copy_buf, headersEnd(sc->copy_buf, len));
         sc->callback = NULL;
         callback(sc->callback_data, sc->copy_buf, len);
     }
    @@ -360,6 +360,13 @@ storeClientReadHeader(void *data, const char *buf, ssize_t len)
     	return;
         }
         tlv_list = storeSwapMetaUnpack(buf, &swap_hdr_sz);
    +    if (swap_hdr_sz > len) {
    +	/* oops, bad disk file? */
    +	debug(20, 1) ("storeClientReadHeader: header too small\n");
    +	sc->callback = NULL;
    +	callback(sc->callback_data, sc->copy_buf, -1);
    +	return;
    +    }
         if (tlv_list == NULL) {
     	debug(20, 1) ("storeClientReadHeader: failed to unpack meta data\n");
     	sc->callback = NULL;
    @@ -387,7 +394,8 @@ storeClientReadHeader(void *data, const char *buf, ssize_t len)
     	    copy_sz);
     	xmemmove(sc->copy_buf, sc->copy_buf + swap_hdr_sz, copy_sz);
     	if (sc->copy_offset == 0 && len > 0 && mem->reply->sline.status == 0)
    -	    httpReplyParse(mem->reply, sc->copy_buf);
    +	    httpReplyParse(mem->reply, sc->copy_buf,
    +		headersEnd(sc->copy_buf, copy_sz));
     	sc->callback = NULL;
     	callback(sc->callback_data, sc->copy_buf, copy_sz);
     	return;
    diff --git a/src/store_log.cc b/src/store_log.cc
    index 9a2f1c0ea5..4c68edd6b3 100644
    --- a/src/store_log.cc
    +++ b/src/store_log.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: store_log.cc,v 1.8 1999/08/02 06:18:46 wessels Exp $
    + * $Id: store_log.cc,v 1.9 1999/10/04 05:05:34 wessels Exp $
      *
      * DEBUG: section 20    Storage Manager Logging Functions
      * AUTHOR: Duane Wessels
    @@ -55,6 +55,8 @@ storeLog(int tag, const StoreEntry * e)
     	return;
         if (mem == NULL)
     	return;
    +    if (EBIT_TEST(e->flags, ENTRY_DONT_LOG))
    +	return;
         if (mem->log_url == NULL) {
     	debug(20, 1) ("storeLog: NULL log_url for %s\n", mem->url);
     	storeMemObjectDump(mem);
    @@ -71,7 +73,7 @@ storeLog(int tag, const StoreEntry * e)
     	(int) reply->date,
     	(int) reply->last_modified,
     	(int) reply->expires,
    -	strBuf(reply->content_type) ? strBuf(reply->content_type) : "unknown",
    +	strLen(reply->content_type) ? strBuf(reply->content_type) : "unknown",
     	reply->content_length,
     	(int) (mem->inmem_hi - mem->reply->hdr_sz),
     	RequestMethodStr[mem->method],
    diff --git a/src/store_swapin.cc b/src/store_swapin.cc
    index b0e3b4f3ed..06aae94e21 100644
    --- a/src/store_swapin.cc
    +++ b/src/store_swapin.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: store_swapin.cc,v 1.20 1999/08/02 06:18:46 wessels Exp $
    + * $Id: store_swapin.cc,v 1.21 1999/10/04 05:05:34 wessels Exp $
      *
      * DEBUG: section 20    Storage Manager Swapin Functions
      * AUTHOR: Duane Wessels
    @@ -71,8 +71,14 @@ static void
     storeSwapInFileClosed(void *data, int errflag, storeIOState * sio)
     {
         store_client *sc = data;
    +    STCB *callback;
         debug(20, 3) ("storeSwapInFileClosed: sio=%p, errflag=%d\n",
     	sio, errflag);
         cbdataUnlock(sio);
         sc->swapin_sio = NULL;
    +    if ((callback = sc->callback)) {
    +	assert(errflag <= 0);
    +	sc->callback = NULL;
    +	callback(sc->callback_data, sc->copy_buf, errflag);
    +    }
     }
    diff --git a/src/structs.h b/src/structs.h
    index 929bf7492d..e4566f6143 100644
    --- a/src/structs.h
    +++ b/src/structs.h
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: structs.h,v 1.306 1999/09/29 00:22:20 wessels Exp $
    + * $Id: structs.h,v 1.307 1999/10/04 05:05:35 wessels Exp $
      *
      *
      * SQUID Internet Object Cache  http://squid.nlanr.net/Squid/
    @@ -75,6 +75,8 @@ struct _acl_proxy_auth_user {
         char *passwd;
         int passwd_ok;		/* 1 = passwd checked OK */
         long expiretime;
    +    struct in_addr ipaddr;	/* IP addr this user authenticated from */
    +    time_t ip_expiretime;
     };
     
     struct _acl_deny_info_list {
    @@ -277,7 +279,6 @@ struct _SquidConfig {
         struct {
     	char *configFile;
     	char *agentInfo;
    -	u_short localPort;
         } Snmp;
     #endif
     #if USE_WCCP
    @@ -310,6 +311,7 @@ struct _SquidConfig {
         int redirectChildren;
         int authenticateChildren;
         int authenticateTTL;
    +    int authenticateIpTTL;
         struct {
     	char *host;
     	u_short port;
    @@ -396,6 +398,7 @@ struct _SquidConfig {
     	int prefer_direct;
     	int strip_query_terms;
     	int redirector_bypass;
    +	int ignore_unknown_nameservers;
         } onoff;
         acl *aclList;
         struct {
    @@ -413,6 +416,7 @@ struct _SquidConfig {
     #if USE_IDENT
     	acl_access *identLookup;
     #endif
    +	acl_access *redirector;
         } accessList;
         acl_deny_info_list *denyInfoList;
         char *proxyAuthRealm;
    @@ -625,8 +629,8 @@ struct _HttpHdrCc {
     
     /* http byte-range-spec */
     struct _HttpHdrRangeSpec {
    -    size_t offset;
    -    size_t length;
    +    ssize_t offset;
    +    ssize_t length;
     };
     
     /* There may be more than one byte range specified in the request.
    @@ -640,7 +644,7 @@ struct _HttpHdrRange {
     /* http content-range header field */
     struct _HttpHdrContRange {
         HttpHdrRangeSpec spec;
    -    size_t elength;		/* entity length, not content length */
    +    ssize_t elength;		/* entity length, not content length */
     };
     
     /* some fields can hold either time or etag specs (e.g. If-Range) */
    @@ -654,8 +658,8 @@ struct _TimeOrTag {
     struct _HttpHdrRangeIter {
         HttpHdrRangePos pos;
         const HttpHdrRangeSpec *spec;	/* current spec at pos */
    -    size_t debt_size;		/* bytes left to send from the current spec */
    -    size_t prefix_size;		/* the size of the incoming HTTP msg prefix */
    +    ssize_t debt_size;		/* bytes left to send from the current spec */
    +    ssize_t prefix_size;	/* the size of the incoming HTTP msg prefix */
         String boundary;		/* boundary for multipart responses */
     };
     
    @@ -1437,6 +1441,7 @@ struct _request_t {
         HierarchyLogEntry hier;
         err_type err_type;
         char *peer_login;		/* Configured peer login:password */
    +    time_t lastmod;		/* Used on refreshes */
     };
     
     struct _cachemgr_passwd {
    @@ -1491,11 +1496,11 @@ struct _ErrorState {
     	unsigned int flag_cbdata:1;
         } flags;
         struct {
    +	wordlist *server_msg;
     	char *request;
     	char *reply;
         } ftp;
         char *request_hdrs;
    -    wordlist *ftp_server_msg;
     };
     
     /*
    @@ -1660,8 +1665,8 @@ struct _storeSwapLogData {
     
     /* object to track per-action memory usage (e.g. #idle objects) */
     struct _MemMeter {
    -    size_t level;		/* current level (count or volume) */
    -    size_t hwater_level;	/* high water mark */
    +    ssize_t level;		/* current level (count or volume) */
    +    ssize_t hwater_level;	/* high water mark */
         time_t hwater_stamp;	/* timestamp of last high water mark change */
     };
     
    @@ -1699,6 +1704,7 @@ struct _ClientInfo {
     	int n_req;
     	int n_denied;
         } cutoff;
    +    int n_established;		/* number of current established connections */
     };
     
     struct _CacheDigest {
    @@ -1728,6 +1734,7 @@ struct _FwdState {
         int n_tries;
         struct {
     	unsigned int dont_retry:1;
    +	unsigned int ftp_pasv_failed:1;
         } flags;
     };
     
    diff --git a/src/tools.cc b/src/tools.cc
    index 231dbd4e0a..4fcc9a589d 100644
    --- a/src/tools.cc
    +++ b/src/tools.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: tools.cc,v 1.186 1999/08/02 06:18:49 wessels Exp $
    + * $Id: tools.cc,v 1.187 1999/10/04 05:05:36 wessels Exp $
      *
      * DEBUG: section 21    Misc Functions
      * AUTHOR: Harvest Derived
    @@ -305,9 +305,9 @@ fatal_common(const char *message)
     #if HAVE_SYSLOG
         syslog(LOG_ALERT, "%s", message);
     #endif
    -    fprintf(debug_log, "FATAL: pid %d %s\n", (int) getpid(), message);
    -    if (opt_debug_stderr && debug_log != stderr)
    -	fprintf(stderr, "FATAL: pid %d %s\n", (int) getpid(), message);
    +    fprintf(debug_log, "FATAL: %s\n", message);
    +    if (opt_debug_stderr > 0 && debug_log != stderr)
    +	fprintf(stderr, "FATAL: %s\n", message);
         fprintf(debug_log, "Squid Cache (Version %s): Terminated abnormally.\n",
     	version_string);
         fflush(debug_log);
    @@ -867,7 +867,6 @@ linklistShift(link_list ** L)
         return p;
     }
     
    -
     /*
      * Same as rename(2) but complains if something goes wrong;
      * the caller is responsible for handing and explaining the 
    @@ -883,3 +882,16 @@ xrename(const char *from, const char *to)
     	from, to, xstrerror());
         return -1;
     }
    +
    +int
    +stringHasCntl(const char *s)
    +{
    +    unsigned char c;
    +    while ((c = (unsigned char) *s++) != '\0') {
    +	if (c <= 0x1f)
    +	    return 1;
    +	if (c >= 0x7f && c <= 0x9f)
    +	    return 1;
    +    }
    +    return 0;
    +}
    diff --git a/src/tunnel.cc b/src/tunnel.cc
    index 3e7fefe1ac..4477d9af8d 100644
    --- a/src/tunnel.cc
    +++ b/src/tunnel.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: tunnel.cc,v 1.99 1999/08/02 06:18:41 wessels Exp $
    + * $Id: tunnel.cc,v 1.100 1999/10/04 05:05:28 wessels Exp $
      *
      * DEBUG: section 26    Secure Sockets Layer Proxy
      * AUTHOR: Duane Wessels
    @@ -206,8 +206,8 @@ sslReadServer(int fd, void *data)
         }
         cbdataLock(sslState);
         if (len < 0) {
    -	debug(50, 1) ("sslReadServer: FD %d: read failure: %s\n",
    -	    fd, xstrerror());
    +	debug(50, ignoreErrno(errno) ? 3 : 1)
    +	    ("sslReadServer: FD %d: read failure: %s\n", fd, xstrerror());
     	if (!ignoreErrno(errno))
     	    comm_close(fd);
         } else if (len == 0) {
    @@ -281,8 +281,8 @@ sslWriteServer(int fd, void *data)
         }
         cbdataLock(sslState);
         if (len < 0) {
    -	debug(50, 1) ("sslWriteServer: FD %d: write failure: %s.\n",
    -	    fd, xstrerror());
    +	debug(50, ignoreErrno(errno) ? 3 : 1)
    +	    ("sslWriteServer: FD %d: write failure: %s.\n", fd, xstrerror());
     	if (!ignoreErrno(errno))
     	    comm_close(fd);
         }
    @@ -322,8 +322,8 @@ sslWriteClient(int fd, void *data)
         }
         cbdataLock(sslState);
         if (len < 0) {
    -	debug(50, 1) ("sslWriteClient: FD %d: write failure: %s.\n",
    -	    fd, xstrerror());
    +	debug(50, ignoreErrno(errno) ? 3 : 1)
    +	    ("sslWriteClient: FD %d: write failure: %s.\n", fd, xstrerror());
     	if (!ignoreErrno(errno))
     	    comm_close(fd);
         }
    diff --git a/src/typedefs.h b/src/typedefs.h
    index 65a0d5c44f..83758775ee 100644
    --- a/src/typedefs.h
    +++ b/src/typedefs.h
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: typedefs.h,v 1.97 1999/06/30 06:29:04 wessels Exp $
    + * $Id: typedefs.h,v 1.98 1999/10/04 05:05:37 wessels Exp $
      *
      *
      * SQUID Internet Object Cache  http://squid.nlanr.net/Squid/
    @@ -246,7 +246,7 @@ typedef unsigned char cache_key;
     typedef int Ctx;
     
     /* in case we want to change it later */
    -typedef size_t mb_size_t;
    +typedef ssize_t mb_size_t;
     
     /* iteration for HttpHdrRange */
     typedef int HttpHdrRangePos;
    diff --git a/src/unlinkd.cc b/src/unlinkd.cc
    index 52bb82b255..acb435bf19 100644
    --- a/src/unlinkd.cc
    +++ b/src/unlinkd.cc
    @@ -1,5 +1,5 @@
     /*
    - * $Id: unlinkd.cc,v 1.31 1999/07/13 14:51:29 wessels Exp $
    + * $Id: unlinkd.cc,v 1.32 1999/10/04 05:05:38 wessels Exp $
      *
      * DEBUG: section 12    Unlink Daemon
      * AUTHOR: Duane Wessels
    @@ -45,15 +45,21 @@ main(int argc, char *argv[])
     {
         char buf[UNLINK_BUF_LEN];
         char *t;
    +    int x;
         setbuf(stdin, NULL);
    +    setbuf(stdout, NULL);
         while (fgets(buf, UNLINK_BUF_LEN, stdin)) {
     	if ((t = strchr(buf, '\n')))
     	    *t = '\0';
     #if USE_TRUNCATE
    -	truncate(buf, 0);
    +	x = truncate(buf, 0);
     #else
    -	unlink(buf);
    +	x = unlink(buf);
     #endif
    +	if (x < 0)
    +	    printf("ERR\n");
    +	else
    +	    printf("OK\n");
         }
         exit(0);
     }
    @@ -67,29 +73,74 @@ static int unlinkd_wfd = -1;
     static int unlinkd_rfd = -1;
     #endif
     
    +#define UNLINKD_QUEUE_LIMIT 20
    +
     void
     unlinkdUnlink(const char *path)
     {
     #if USE_UNLINKD
    -    char *buf;
    +    char buf[MAXPATHLEN];
         int l;
    +    int x;
    +    static int queuelen = 0;
         if (unlinkd_wfd < 0) {
     	debug_trap("unlinkdUnlink: unlinkd_wfd < 0");
     	safeunlink(path, 0);
     	return;
         }
    -    l = strlen(path) + 1;
    -    buf = xcalloc(1, l + 1);
    -    strcpy(buf, path);
    -    strcat(buf, "\n");
    -    file_write(unlinkd_wfd,
    -	-1,
    -	buf,
    -	l,
    -	NULL,			/* Handler */
    -	NULL,			/* Handler-data */
    -	xfree);
    +    /*
    +     * If the queue length is greater than our limit, then
    +     * we pause for up to 100ms, hoping that unlinkd
    +     * has some feedback for us.  Maybe it just needs a slice
    +     * of the CPU's time.
    +     */
    +    if (queuelen >= UNLINKD_QUEUE_LIMIT) {
    +	struct timeval to;
    +	fd_set R;
    +	int x;
    +	FD_ZERO(&R);
    +	FD_SET(unlinkd_rfd, &R);
    +	to.tv_sec = 0;
    +	to.tv_usec = 100000;
    +	x = select(unlinkd_rfd + 1, &R, NULL, NULL, &to);
    +    }
    +    /*
    +     * If there is at least one outstanding unlink request, then
    +     * try to read a response.  If there's nothing to read we'll
    +     * get an EWOULDBLOCK or whatever.  If we get a response, then
    +     * decrement the queue size by the number of newlines read.
    +     */
    +    if (queuelen > 0) {
    +	int x;
    +	int i;
    +	char rbuf[512];
    +	x = read(unlinkd_rfd, rbuf, 511);
    +	if (x > 0) {
    +	    rbuf[x] = '\0';
    +	    for (i = 0; i < x; i++)
    +		if ('\n' == rbuf[i])
    +		    queuelen--;
    +	    assert(queuelen >= 0);
    +	}
    +    }
    +    l = strlen(path);
    +    assert(l < MAXPATHLEN);
    +    xstrncpy(buf, path, MAXPATHLEN);
    +    buf[l++] = '\n';
    +    x = write(unlinkd_wfd, buf, l);
    +    if (x < 0) {
    +	debug(50, 1) ("unlinkdUnlink: write FD %d failed: %s\n",
    +	    unlinkd_wfd, xstrerror());
    +	safeunlink(path, 0);
    +	return;
    +    } else if (x != l) {
    +	debug(50, 1) ("unlinkdUnlink: FD %d only wrote %d of %d bytes\n",
    +	    unlinkd_wfd, x, l);
    +	safeunlink(path, 0);
    +	return;
    +    }
         Counter.unlink.requests++;
    +    queuelen++;
     #endif
     }
     
    @@ -136,9 +187,15 @@ unlinkdInit(void)
         fd_note(unlinkd_rfd, "unlinkd -> squid");
         commSetTimeout(unlinkd_rfd, -1, NULL, NULL);
         commSetTimeout(unlinkd_wfd, -1, NULL, NULL);
    -    commSetNonBlocking(unlinkd_wfd);
    +    /*
    +     * We leave unlinkd_wfd blocking, because we never want to lose an
    +     * unlink request, and we don't have code to retry if we get
    +     * EWOULDBLOCK.
    +     */
         commSetNonBlocking(unlinkd_rfd);
         debug(12, 1) ("Unlinkd pipe opened on FD %d\n", unlinkd_wfd);
    +#else
    +    debug(12, 1) ("Unlinkd is disabled\n");
     #endif
     }
     
    diff --git a/src/url.cc b/src/url.cc
    index a998a2028c..e454e7920d 100644
    --- a/src/url.cc
    +++ b/src/url.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: url.cc,v 1.118 1999/08/02 06:18:49 wessels Exp $
    + * $Id: url.cc,v 1.119 1999/10/04 05:05:38 wessels Exp $
      *
      * DEBUG: section 23    URL Parsing
      * AUTHOR: Duane Wessels
    @@ -123,6 +123,25 @@ urlInitialize(void)
         debug(23, 5) ("urlInitialize: Initializing...\n");
         assert(sizeof(ProtocolStr) == (PROTO_MAX + 1) * sizeof(char *));
         memset(&null_request_flags, '\0', sizeof(null_request_flags));
    +    /*
    +     * These test that our matchDomainName() function works the
    +     * way we expect it to.
    +     */
    +    assert(0 == matchDomainName("foo.com", "foo.com"));
    +    assert(0 == matchDomainName(".foo.com", "foo.com"));
    +    assert(0 == matchDomainName("foo.com", ".foo.com"));
    +    assert(0 == matchDomainName(".foo.com", ".foo.com"));
    +    assert(0 == matchDomainName("x.foo.com", ".foo.com"));
    +    assert(0 != matchDomainName("x.foo.com", "foo.com"));
    +    assert(0 != matchDomainName("foo.com", "x.foo.com"));
    +    assert(0 != matchDomainName("bar.com", "foo.com"));
    +    assert(0 != matchDomainName(".bar.com", "foo.com"));
    +    assert(0 != matchDomainName(".bar.com", ".foo.com"));
    +    assert(0 != matchDomainName("bar.com", ".foo.com"));
    +    assert(0 < matchDomainName("zzz.com", "foo.com"));
    +    assert(0 > matchDomainName("aaa.com", "foo.com"));
    +    assert(0 == matchDomainName("FOO.com", "foo.COM"));
    +    /* more cases? */
     }
     
     method_t
    @@ -292,7 +311,7 @@ urlParse(method_t method, char *url)
     	case URI_WHITESPACE_ALLOW:
     	    break;
     	case URI_WHITESPACE_ENCODE:
    -	    t = rfc1738_escape(urlpath);
    +	    t = rfc1738_escape_unescaped(urlpath);
     	    xstrncpy(urlpath, t, MAX_URL);
     	    break;
     	case URI_WHITESPACE_CHOP:
    @@ -385,26 +404,83 @@ urlCanonicalClean(const request_t * request)
     	    break;
     	}
         }
    -    if (stringHasWhitespace(buf))
    -	xstrncpy(buf, rfc1738_escape(buf), MAX_URL);
    +    if (stringHasCntl(buf))
    +	xstrncpy(buf, rfc1738_escape_unescaped(buf), MAX_URL);
         return buf;
     }
     
    +/*
    + * matchDomainName() compares a hostname with a domainname according
    + * to the following rules:
    + * 
    + *    HOST          DOMAIN        MATCH?
    + * ------------- -------------    ------
    + *    foo.com       foo.com         YES
    + *   .foo.com       foo.com         YES
    + *  x.foo.com       foo.com          NO
    + *    foo.com      .foo.com         YES
    + *   .foo.com      .foo.com         YES
    + *  x.foo.com      .foo.com         YES
    + *
    + *  We strip leading dots on hosts (but not domains!) so that
    + *  ".foo.com" is is always the same as "foo.com".
    + *
    + *  Return values:
    + *     0 means the host matches the domain
    + *     1 means the host is greater than the domain
    + *    -1 means the host is less than the domain
    + */
    +
     int
    -matchDomainName(const char *domain, const char *host)
    +matchDomainName(const char *h, const char *d)
     {
    -    int offset;
    -    if ((offset = strlen(host) - strlen(domain)) < 0)
    -	return 0;		/* host too short */
    -    if (strcasecmp(domain, host + offset) != 0)
    -	return 0;		/* no match at all */
    -    if (*domain == '.')
    -	return 1;
    -    if (offset == 0)
    -	return 1;
    -    if (*(host + offset - 1) == '.')
    -	return 1;
    -    return 0;
    +    int dl;
    +    int hl;
    +    while ('.' == *h)
    +	h++;
    +    hl = strlen(h);
    +    dl = strlen(d);
    +    /*
    +     * Start at the ends of the two strings and work towards the
    +     * beginning.
    +     */
    +    while (xtolower(h[--hl]) == xtolower(d[--dl])) {
    +	if (hl == 0 && dl == 0) {
    +	    /*
    +	     * We made it all the way to the beginning of both
    +	     * strings without finding any difference.
    +	     */
    +	    return 0;
    +	}
    +	if (0 == hl) {
    +	    /* 
    +	     * The host string is shorter than the domain string.
    +	     * There is only one case when this can be a match.
    +	     * If the domain is just one character longer, and if
    +	     * that character is a leading '.' then we call it a
    +	     * match.
    +	     */
    +	    if (1 == dl && '.' == d[0])
    +		return 0;
    +	    else
    +		return -1;
    +	}
    +	if (0 == dl) {
    +	    /*
    +	     * The domain string is shorter than the host string.
    +	     * This is a match only if the first domain character
    +	     * is a leading '.'.
    +	     */
    +	    if ('.' == d[0])
    +		return 0;
    +	    else
    +		return 1;
    +	}
    +    }
    +    /*
    +     * We found different characters in the same position (from the end).
    +     */
    +    return (xtolower(h[hl]) - xtolower(d[dl]));
     }
     
     int
    @@ -422,7 +498,6 @@ urlCheckRequest(const request_t * r)
         switch (r->protocol) {
         case PROTO_URN:
         case PROTO_HTTP:
    -    case PROTO_HTTPS:
         case PROTO_CACHEOBJ:
     	rc = 1;
     	break;
    @@ -437,6 +512,13 @@ urlCheckRequest(const request_t * r)
     	else if (r->method == METHOD_HEAD)
     	    rc = 1;
     	break;
    +    case PROTO_HTTPS:
    +	/*
    +	 * Squid can't originate an SSL connection, so it should
    +	 * never receive an "https:" URL.  It should always be
    +	 * CONNECT instead.
    +	 */
    +	rc = 0;
         default:
     	break;
         }
    diff --git a/src/urn.cc b/src/urn.cc
    index d44e65e356..98588f7a30 100644
    --- a/src/urn.cc
    +++ b/src/urn.cc
    @@ -1,7 +1,7 @@
     
     /*
      *
    - * $Id: urn.cc,v 1.55 1999/05/04 21:58:46 wessels Exp $
    + * $Id: urn.cc,v 1.56 1999/10/04 05:05:39 wessels Exp $
      *
      * DEBUG: section 52    URN Parsing
      * AUTHOR: Kostas Anagnostakis
    @@ -217,7 +217,7 @@ urnHandleReply(void *data, char *buf, ssize_t size)
         }
         s = buf + k;
         assert(urlres_e->mem_obj->reply);
    -    httpReplyParse(urlres_e->mem_obj->reply, buf);
    +    httpReplyParse(urlres_e->mem_obj->reply, buf, k);
         debug(52, 3) ("mem->reply exists, code=%d.\n",
     	urlres_e->mem_obj->reply->sline.status);
         if (urlres_e->mem_obj->reply->sline.status != HTTP_OK) {
    diff --git a/src/wccp.cc b/src/wccp.cc
    index 57d15a2b27..428b6d0839 100644
    --- a/src/wccp.cc
    +++ b/src/wccp.cc
    @@ -1,6 +1,6 @@
     
     /*
    - * $Id: wccp.cc,v 1.9 1999/08/02 06:18:51 wessels Exp $
    + * $Id: wccp.cc,v 1.10 1999/10/04 05:05:40 wessels Exp $
      *
      * DEBUG: section 80     WCCP Support
      * AUTHOR: Glenn Chisholm
    @@ -277,8 +277,7 @@ static void
     wccpAssignBuckets(void *voidnotused)
     {
         struct wccp_assign_bucket_t wccp_assign_bucket;
    -    int number_buckets;
    -    int loop_buckets;
    +    int buckets_per_cache;
         int loop;
         int number_caches;
         int bucket = 0;
    @@ -296,12 +295,14 @@ wccpAssignBuckets(void *voidnotused)
     	number_caches = WCCP_ACTIVE_CACHES;
         caches = xmalloc(sizeof(int) * number_caches);
     
    -    number_buckets = WCCP_BUCKETS / number_caches;
    +    buckets_per_cache = WCCP_BUCKETS / number_caches;
         for (loop = 0; loop < number_caches; loop++) {
    +	int i;
     	xmemcpy(&caches[loop],
     	    &wccp_i_see_you.wccp_cache_entry[loop].ip_addr.s_addr,
     	    sizeof(*caches));
    -	for (loop_buckets = 0; loop_buckets < number_buckets; loop_buckets++) {
    +	for (i = 0; i < buckets_per_cache; i++) {
    +	    assert(bucket < WCCP_BUCKETS);
     	    buckets[bucket++] = loop;
     	}
         }