From: wessels <> Date: Tue, 7 Apr 1998 04:32:05 +0000 (+0000) Subject: gindent X-Git-Tag: SQUID_3_0_PRE1~3614 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b644367b74217c80e27df07d1c78fc395f936566;p=thirdparty%2Fsquid.git gindent --- diff --git a/src/CacheDigest.cc b/src/CacheDigest.cc index 68e4762a0c..a64f7e964c 100644 --- a/src/CacheDigest.cc +++ b/src/CacheDigest.cc @@ -1,6 +1,6 @@ /* - * $Id: CacheDigest.cc,v 1.9 1998/04/04 07:47:53 rousskov Exp $ + * $Id: CacheDigest.cc,v 1.10 1998/04/06 22:32:05 wessels Exp $ * * DEBUG: section 70 Cache Digest * AUTHOR: Alex Rousskov @@ -33,14 +33,14 @@ /* local types */ typedef struct { - int bit_count; /* total number of bits */ - int bit_on_count; /* #bits turned on */ - int bseq_len_sum; /* sum of all bit seq length */ - int bseq_count; /* number of bit seqs */ + int bit_count; /* total number of bits */ + int bit_on_count; /* #bits turned on */ + int bseq_len_sum; /* sum of all bit seq length */ + int bseq_count; /* number of bit seqs */ } CacheDigestStats; /* local functions */ -static void cacheDigestHashKey(int bit_count, const cache_key *key); +static void cacheDigestHashKey(int bit_count, const cache_key * key); /* configuration params */ static const int BitsPerEntry = 4; @@ -127,7 +127,7 @@ cacheDigestDel(CacheDigest * cd, const cache_key * key) /* returns mask utilization parameters */ static void -cacheDigestStats(const CacheDigest * cd, CacheDigestStats *stats) +cacheDigestStats(const CacheDigest * cd, CacheDigestStats * stats) { const int bit_count = cd->capacity * BitsPerEntry; int on_count = 0; @@ -157,34 +157,34 @@ cacheDigestStats(const CacheDigest * cd, CacheDigestStats *stats) } void -cacheDigestReport(CacheDigest *cd, const char *label, StoreEntry * e) +cacheDigestReport(CacheDigest * cd, const char *label, StoreEntry * e) { CacheDigestStats stats; assert(cd && e); cacheDigestStats(cd, &stats); storeAppendPrintf(e, "%s digest: size: %d bytes\n", - label ? label : "", stats.bit_count/8 - ); + label ? label : "", stats.bit_count / 8 + ); storeAppendPrintf(e, "\t entries: count: %d capacity: %d util: %d%%\n", cd->count, cd->capacity, xpercentInt(cd->count, cd->capacity) - ); - storeAppendPrintf(e, "\t deletion attempts: %d\n", + ); + storeAppendPrintf(e, "\t deletion attempts: %d\n", cd->del_count - ); - storeAppendPrintf(e, "\t bits: on: %d capacity: %d util: %d%%\n", + ); + storeAppendPrintf(e, "\t bits: on: %d capacity: %d util: %d%%\n", stats.bit_on_count, stats.bit_count, xpercentInt(stats.bit_on_count, stats.bit_count) - ); - storeAppendPrintf(e, "\t bit-seq: count: %d avg.len: %.2f\n", + ); + storeAppendPrintf(e, "\t bit-seq: count: %d avg.len: %.2f\n", stats.bseq_count, xdiv(stats.bseq_len_sum, stats.bseq_count) - ); + ); } static void -cacheDigestHashKey(int bit_count, const cache_key *key) +cacheDigestHashKey(int bit_count, const cache_key * key) { /* get four hashed values */ memcpy(hashed_keys, key, sizeof(hashed_keys)); diff --git a/src/HttpHdrContRange.cc b/src/HttpHdrContRange.cc index 01706534ea..30208f15ce 100644 --- a/src/HttpHdrContRange.cc +++ b/src/HttpHdrContRange.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrContRange.cc,v 1.3 1998/03/11 22:18:44 rousskov Exp $ + * $Id: HttpHdrContRange.cc,v 1.4 1998/04/06 22:32:06 wessels Exp $ * * DEBUG: section 68 HTTP Content-Range Header * AUTHOR: Alex Rousskov @@ -31,17 +31,17 @@ #include "squid.h" -#if 0 - Currently only byte ranges are supported - - Content-Range = "Content-Range" ":" content-range-spec - content-range-spec = byte-content-range-spec - byte-content-range-spec = bytes-unit SP - ( byte-range-resp-spec | "*") "/" - ( entity-length | "*" ) - byte-range-resp-spec = first-byte-pos "-" last-byte-pos - entity-length = 1*DIGIT -#endif +/* + * Currently only byte ranges are supported + * + * Content-Range = "Content-Range" ":" content-range-spec + * content-range-spec = byte-content-range-spec + * byte-content-range-spec = bytes-unit SP + * ( byte-range-resp-spec | "*") "/" + * ( entity-length | "*" ) + * byte-range-resp-spec = first-byte-pos "-" last-byte-pos + * entity-length = 1*DIGIT + */ /* local constants */ @@ -56,7 +56,7 @@ /* parses range-resp-spec and inits spec, returns true on success */ static int -httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec *spec, const char *field, int flen) +httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec * spec, const char *field, int flen) { const char *p; assert(spec); @@ -67,20 +67,20 @@ httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec *spec, const char *field, int fle if (*field == '*') return 1; /* check format, must be %d-%d */ - if (!((p = strchr(field, '-')) && (p-field < flen))) { + if (!((p = strchr(field, '-')) && (p - field < flen))) { debug(68, 2) ("invalid (no '-') resp-range-spec near: '%s'\n", field); return 0; } /* parse offset */ if (!httpHeaderParseSize(field, &spec->offset)) - return 0; + return 0; p++; /* do we have last-pos ? */ if (p - field < flen) { size_t last_pos; if (!httpHeaderParseSize(p, &last_pos)) return 0; - spec->length = size_diff(last_pos+1, spec->offset); + spec->length = size_diff(last_pos + 1, spec->offset); } /* we managed to parse, check if the result makes sence */ if (known_spec(spec->length) && !spec->length) { @@ -92,13 +92,13 @@ httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec *spec, const char *field, int fle } static void -httpHdrRangeRespSpecPackInto(const HttpHdrRangeSpec *spec, Packer *p) +httpHdrRangeRespSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p) { if (!known_spec(spec->offset) || !known_spec(spec->length)) packerPrintf(p, "*"); else - packerPrintf(p, "%d-%d", - spec->offset, spec->offset+spec->length-1); + packerPrintf(p, "%d-%d", + spec->offset, spec->offset + spec->length - 1); } /* @@ -127,7 +127,7 @@ httpHdrContRangeParseCreate(const char *str) /* returns true if ranges are valid; inits HttpHdrContRange */ int -httpHdrContRangeParseInit(HttpHdrContRange *range, const char *str) +httpHdrContRangeParseInit(HttpHdrContRange * range, const char *str) { const char *p; assert(range && str); @@ -141,23 +141,21 @@ httpHdrContRangeParseInit(HttpHdrContRange *range, const char *str) return 0; if (*str == '*') range->spec.offset = range->spec.length = range_spec_unknown; - else - if (!httpHdrRangeRespSpecParseInit(&range->spec, str, p-str)) + else if (!httpHdrRangeRespSpecParseInit(&range->spec, str, p - str)) return 0; p++; if (*p == '*') range->elength = range_spec_unknown; - else - if (!httpHeaderParseSize(p, &range->elength)) + else if (!httpHeaderParseSize(p, &range->elength)) return 0; - debug(68, 8) ("parsed content-range field: %d-%d / %d\n", - range->spec.offset, range->spec.offset+range->spec.length-1, + debug(68, 8) ("parsed content-range field: %d-%d / %d\n", + range->spec.offset, range->spec.offset + range->spec.length - 1, range->elength); return 1; } void -httpHdrContRangeDestroy(HttpHdrContRange *range) +httpHdrContRangeDestroy(HttpHdrContRange * range) { assert(range); memFree(MEM_HTTP_HDR_CONTENT_RANGE, range); diff --git a/src/HttpHdrExtField.cc b/src/HttpHdrExtField.cc index cf637511c0..10ae40ee01 100644 --- a/src/HttpHdrExtField.cc +++ b/src/HttpHdrExtField.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrExtField.cc,v 1.2 1998/03/08 21:02:07 rousskov Exp $ + * $Id: HttpHdrExtField.cc,v 1.3 1998/04/06 22:32:06 wessels Exp $ * * DEBUG: section 69 HTTP Header: Extension Field * AUTHOR: Alex Rousskov @@ -38,7 +38,7 @@ static HttpHdrExtField *httpHdrExtFieldDoCreate(const char *name, int name_len, /* implementation */ static HttpHdrExtField * -httpHdrExtFieldDoCreate(const char *name, int name_len, +httpHdrExtFieldDoCreate(const char *name, int name_len, const char *value, int value_len) { HttpHdrExtField *f = xcalloc(1, sizeof(HttpHdrExtField)); @@ -51,7 +51,7 @@ HttpHdrExtField * httpHdrExtFieldCreate(const char *name, const char *value) { return httpHdrExtFieldDoCreate( - name, strlen(name), + name, strlen(name), value, strlen(value)); } @@ -80,7 +80,7 @@ httpHdrExtFieldParseCreate(const char *field_start, const char *field_end) } return httpHdrExtFieldDoCreate( field_start, name_end - field_start, - value_start, field_end - value_start); + value_start, field_end - value_start); } void diff --git a/src/HttpHdrRange.cc b/src/HttpHdrRange.cc index b341dbbe4d..1940445857 100644 --- a/src/HttpHdrRange.cc +++ b/src/HttpHdrRange.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHdrRange.cc,v 1.5 1998/03/11 21:11:47 rousskov Exp $ + * $Id: HttpHdrRange.cc,v 1.6 1998/04/06 22:32:07 wessels Exp $ * * DEBUG: section 64 HTTP Range Header * AUTHOR: Alex Rousskov @@ -31,24 +31,23 @@ #include "squid.h" -#if 0 - Currently only byte ranges are supported - - Essentially, there are three types of byte ranges: - - 1) first-byte-pos "-" last-byte-pos // range - 2) first-byte-pos "-" // trailer - 3) "-" suffix-length // suffix (last length bytes) - - - When Range field is parsed, we have no clue about the content length of - the document. Thus, we simply code an "absent" part using range_spec_unknown - constant. - - Note: when response length becomes known, we convert any range spec into - type one above. (Canonization process). - -#endif +/* + * Currently only byte ranges are supported + * + * Essentially, there are three types of byte ranges: + * + * 1) first-byte-pos "-" last-byte-pos // range + * 2) first-byte-pos "-" // trailer + * 3) "-" suffix-length // suffix (last length bytes) + * + * + * When Range field is parsed, we have no clue about the content + * length of the document. Thus, we simply code an "absent" part + * using range_spec_unknown constant. + * + * Note: when response length becomes known, we convert any range + * spec into type one above. (Canonization process). + */ /* local constants */ @@ -58,9 +57,9 @@ #define known_spec(s) ((s) != range_spec_unknown) #define size_min(a,b) ((a) <= (b) ? (a) : (b)) #define size_diff(a,b) ((a) >= (b) ? ((a)-(b)) : 0) -static HttpHdrRangeSpec *httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec); -static int httpHdrRangeSpecCanonize(HttpHdrRangeSpec *spec, size_t clen); -static void httpHdrRangeSpecPackInto(const HttpHdrRangeSpec *spec, Packer *p); +static HttpHdrRangeSpec *httpHdrRangeSpecDup(const HttpHdrRangeSpec * spec); +static int httpHdrRangeSpecCanonize(HttpHdrRangeSpec * spec, size_t clen); +static void httpHdrRangeSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p); /* globals */ static int RangeParsedCount = 0; @@ -79,17 +78,18 @@ httpHdrRangeSpecCreate() static HttpHdrRangeSpec * httpHdrRangeSpecParseCreate(const char *field, int flen) { - HttpHdrRangeSpec spec = { range_spec_unknown, range_spec_unknown }; + HttpHdrRangeSpec spec = + {range_spec_unknown, range_spec_unknown}; const char *p; if (flen < 2) return NULL; /* is it a suffix-byte-range-spec ? */ if (*field == '-') { - if (!httpHeaderParseSize(field+1, &spec.length)) + if (!httpHeaderParseSize(field + 1, &spec.length)) return NULL; } else - /* must have a '-' somewhere in _this_ field */ - if (!((p = strchr(field, '-')) || (p-field >= flen))) { + /* must have a '-' somewhere in _this_ field */ + if (!((p = strchr(field, '-')) || (p - field >= flen))) { debug(64, 2) ("ignoring invalid (missing '-') range-spec near: '%s'\n", field); return NULL; } else { @@ -100,8 +100,8 @@ httpHdrRangeSpecParseCreate(const char *field, int flen) if (p - field < flen) { size_t last_pos; if (!httpHeaderParseSize(p, &last_pos)) - return NULL; - spec.length = size_diff(last_pos+1, spec.offset); + return NULL; + spec.length = size_diff(last_pos + 1, spec.offset); } } /* we managed to parse, check if the result makes sence */ @@ -113,14 +113,14 @@ httpHdrRangeSpecParseCreate(const char *field, int flen) } static void -httpHdrRangeSpecDestroy(HttpHdrRangeSpec *spec) +httpHdrRangeSpecDestroy(HttpHdrRangeSpec * spec) { memFree(MEM_HTTP_HDR_RANGE_SPEC, spec); } static HttpHdrRangeSpec * -httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec) +httpHdrRangeSpecDup(const HttpHdrRangeSpec * spec) { HttpHdrRangeSpec *dup = httpHdrRangeSpecCreate(); dup->offset = spec->offset; @@ -129,29 +129,27 @@ httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec) } static void -httpHdrRangeSpecPackInto(const HttpHdrRangeSpec *spec, Packer *p) +httpHdrRangeSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p) { - if (!known_spec(spec->offset)) /* suffix */ + if (!known_spec(spec->offset)) /* suffix */ packerPrintf(p, "-%d", spec->length); - else - if (!known_spec(spec->length)) /* trailer */ + else if (!known_spec(spec->length)) /* trailer */ packerPrintf(p, "%d-", spec->offset); - else /* range */ - packerPrintf(p, "%d-%d", - spec->offset, spec->offset+spec->length-1); + else /* range */ + packerPrintf(p, "%d-%d", + spec->offset, spec->offset + spec->length - 1); } /* fills "absent" positions in range specification based on response body size - returns true if the range is still valid - range is valid if its intersection with [0,length-1] is not empty -*/ + * returns true if the range is still valid + * range is valid if its intersection with [0,length-1] is not empty + */ static int -httpHdrRangeSpecCanonize(HttpHdrRangeSpec *spec, size_t clen) +httpHdrRangeSpecCanonize(HttpHdrRangeSpec * spec, size_t clen) { - if (!known_spec(spec->offset)) /* suffix */ + if (!known_spec(spec->offset)) /* suffix */ spec->offset = size_diff(clen, spec->length); - else - if (!known_spec(spec->length)) /* trailer */ + else if (!known_spec(spec->length)) /* trailer */ spec->length = size_diff(clen, spec->offset); /* we have a "range" now, adjust length if needed */ assert(known_spec(spec->length)); @@ -186,7 +184,7 @@ httpHdrRangeParseCreate(const char *str) /* returns true if ranges are valid; inits HttpHdrRange */ int -httpHdrRangeParseInit(HttpHdrRange *range, const char *str) +httpHdrRangeParseInit(HttpHdrRange * range, const char *str) { const char *item; const char *pos = NULL; @@ -213,7 +211,7 @@ httpHdrRangeParseInit(HttpHdrRange *range, const char *str) } void -httpHdrRangeDestroy(HttpHdrRange *range) +httpHdrRangeDestroy(HttpHdrRange * range) { assert(range); while (range->specs.count) @@ -257,25 +255,25 @@ httpHdrRangePackInto(const HttpHdrRange * range, Packer * p) * - there is at least one range spec */ int -httpHdrRangeCanonize(HttpHdrRange *range, size_t clen) +httpHdrRangeCanonize(HttpHdrRange * range, size_t clen) { int i; assert(range); for (i = 0; i < range->specs.count; i++) - if (!httpHdrRangeSpecCanonize(range->specs.items[i], clen)) + if (!httpHdrRangeSpecCanonize(range->specs.items[i], clen)) return 0; return range->specs.count; } /* searches for next range, returns true if found */ int -httpHdrRangeGetSpec(const HttpHdrRange *range, HttpHdrRangeSpec *spec, int *pos) +httpHdrRangeGetSpec(const HttpHdrRange * range, HttpHdrRangeSpec * spec, int *pos) { assert(range && spec); assert(pos && *pos >= -1 && *pos < range->specs.count); (*pos)++; if (*pos < range->specs.count) { - *spec = *(HttpHdrRangeSpec*)range->specs.items[*pos]; + *spec = *(HttpHdrRangeSpec *) range->specs.items[*pos]; return 1; } spec->offset = spec->length = 0; diff --git a/src/HttpHeader.cc b/src/HttpHeader.cc index e1576878ea..5a132702ce 100644 --- a/src/HttpHeader.cc +++ b/src/HttpHeader.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpHeader.cc,v 1.30 1998/04/05 20:32:44 wessels Exp $ + * $Id: HttpHeader.cc,v 1.31 1998/04/06 22:32:07 wessels Exp $ * * DEBUG: section 55 HTTP Header * AUTHOR: Alex Rousskov @@ -103,7 +103,7 @@ static const HttpHeaderFieldAttrs HeadersAttrs[] = {"Last-Modified", HDR_LAST_MODIFIED, ftDate_1123}, {"Location", HDR_LOCATION, ftStr}, {"Max-Forwards", HDR_MAX_FORWARDS, ftInt}, - {"Mime-Version", HDR_MIME_VERSION, ftStr}, /* for now */ + {"Mime-Version", HDR_MIME_VERSION, ftStr}, /* for now */ {"Proxy-Authenticate", HDR_PROXY_AUTHENTICATE, ftStr}, {"Proxy-Connection", HDR_PROXY_CONNECTION, ftStr}, {"Public", HDR_PUBLIC, ftStr}, @@ -123,7 +123,7 @@ static HttpHeaderFieldInfo *Headers = NULL; * headers with field values defined as #(values) in HTTP/1.1 * Headers that are currently not recognized, are commented out. */ -static HttpHeaderMask ListHeadersMask; /* set run-time using ListHeadersArr */ +static HttpHeaderMask ListHeadersMask; /* set run-time using ListHeadersArr */ static http_hdr_type ListHeadersArr[] = { HDR_ACCEPT, @@ -141,7 +141,7 @@ static http_hdr_type ListHeadersArr[] = /* HDR_EXPECT, HDR_TE, HDR_TRAILER */ }; -static HttpHeaderMask ReplyHeadersMask; /* set run-time using ReplyHeaders */ +static HttpHeaderMask ReplyHeadersMask; /* set run-time using ReplyHeaders */ static http_hdr_type ReplyHeadersArr[] = { HDR_ACCEPT, HDR_ACCEPT_CHARSET, HDR_ACCEPT_ENCODING, HDR_ACCEPT_LANGUAGE, @@ -152,7 +152,7 @@ static http_hdr_type ReplyHeadersArr[] = HDR_UPGRADE, HDR_WARNING, HDR_PROXY_CONNECTION, HDR_X_CACHE, HDR_OTHER }; -static HttpHeaderMask RequestHeadersMask; /* set run-time using RequestHeaders */ +static HttpHeaderMask RequestHeadersMask; /* set run-time using RequestHeaders */ static http_hdr_type RequestHeadersArr[] = { HDR_RANGE, HDR_OTHER @@ -183,7 +183,7 @@ static HttpHeaderEntry *httpHeaderGetEntry(const HttpHeader * hdr, HttpHeaderPos static void httpHeaderDelAt(HttpHeader * hdr, HttpHeaderPos pos); /* static int httpHeaderDelById(HttpHeader * hdr, http_hdr_type id); */ static void httpHeaderAddEntry(HttpHeader * hdr, HttpHeaderEntry * e); -static String httpHeaderJoinEntries(const HttpHeader *hdr, http_hdr_type id); +static String httpHeaderJoinEntries(const HttpHeader * hdr, http_hdr_type id); static HttpHeaderEntry *httpHeaderEntryCreate(http_hdr_type id, const char *name, const char *value); static void httpHeaderEntryDestroy(HttpHeaderEntry * e); @@ -206,7 +206,7 @@ httpHeaderInitModule() { int i; /* check that we have enough space for masks */ - assert(8*sizeof(HttpHeaderMask) >= HDR_ENUM_END); + assert(8 * sizeof(HttpHeaderMask) >= HDR_ENUM_END); Headers = httpHeaderBuildFieldsInfo(HeadersAttrs, HDR_ENUM_END); /* create masks */ httpHeaderCalcMask(&ListHeadersMask, (const int *) ListHeadersArr, countof(ListHeadersArr)); @@ -272,16 +272,16 @@ httpHeaderClean(HttpHeader * hdr) debug(55, 0) ("httpHeaderClean BUG: entry[%d] is invalid (%d). Ignored.\n", pos, e->id); else - /* end of hack */ - /* yes, this destroy() leaves us in an incosistent state */ - httpHeaderEntryDestroy(e); + /* end of hack */ + /* yes, this destroy() leaves us in an incosistent state */ + httpHeaderEntryDestroy(e); } arrayClean(&hdr->entries); } /* use fresh entries to replace old ones */ void -httpHeaderUpdate(HttpHeader *old, const HttpHeader *fresh) +httpHeaderUpdate(HttpHeader * old, const HttpHeader * fresh) { HttpHeaderEntry *e; HttpHeaderEntry *e_clone; @@ -321,7 +321,7 @@ httpHeaderParse(HttpHeader * hdr, const char *header_start, const char *header_e const char *field_end = field_start + strcspn(field_start, "\r\n"); if (!*field_end || field_end > header_end) return httpHeaderReset(hdr); /* missing */ - e = httpHeaderEntryParseCreate(field_start, field_end); + e = httpHeaderEntryParseCreate(field_start, field_end); if (e != NULL) httpHeaderAddEntry(hdr, e); else @@ -334,7 +334,7 @@ httpHeaderParse(HttpHeader * hdr, const char *header_start, const char *header_e if (*field_start == '\n') field_start++; } - return 1; /* even if no fields where found, it is a valid header */ + return 1; /* even if no fields where found, it is a valid header */ } /* @@ -362,7 +362,7 @@ httpHeaderGetEntry(const HttpHeader * hdr, HttpHeaderPos * pos) debug(55, 8) ("searching for next e in hdr %p from %d\n", hdr, *pos); for ((*pos)++; *pos < hdr->entries.count; (*pos)++) { if (hdr->entries.items[*pos]) - return hdr->entries.items[*pos]; + return hdr->entries.items[*pos]; } debug(55, 8) ("no more entries in hdr %p\n", hdr); return NULL; @@ -393,7 +393,7 @@ httpHeaderFindEntry(const HttpHeader * hdr, http_hdr_type id) } /* hm.. we thought it was there, but it was not found */ assert(0); - return NULL; /* not reached */ + return NULL; /* not reached */ } /* @@ -418,7 +418,7 @@ httpHeaderFindLastEntry(const HttpHeader * hdr, http_hdr_type id) if (e->id == id) result = e; } - assert(result); /* must be there! */ + assert(result); /* must be there! */ return result; } @@ -431,7 +431,7 @@ httpHeaderDelByName(HttpHeader * hdr, const char *name) int count = 0; HttpHeaderPos pos = HttpHeaderInitPos; HttpHeaderEntry *e; - httpHeaderMaskInit(&hdr->mask); /* temporal inconsistency */ + httpHeaderMaskInit(&hdr->mask); /* temporal inconsistency */ debug(55, 7) ("deleting '%s' fields in hdr %p\n", name, hdr); while ((e = httpHeaderGetEntry(hdr, &pos))) { if (!strCaseCmp(e->name, name)) { @@ -497,7 +497,7 @@ httpHeaderAddEntry(HttpHeader * hdr, HttpHeaderEntry * e) } static String -httpHeaderJoinEntries(const HttpHeader *hdr, http_hdr_type id) +httpHeaderJoinEntries(const HttpHeader * hdr, http_hdr_type id) { String s = StringNull; HttpHeaderEntry *e; @@ -534,7 +534,7 @@ void httpHeaderPutInt(HttpHeader * hdr, http_hdr_type id, int number) { assert_eid(id); - assert(Headers[id].type == ftInt); /* must be of an appropriatre type */ + assert(Headers[id].type == ftInt); /* must be of an appropriatre type */ assert(number >= 0); httpHeaderAddEntry(hdr, httpHeaderEntryCreate(id, NULL, xitoa(number))); } @@ -543,7 +543,7 @@ void httpHeaderPutTime(HttpHeader * hdr, http_hdr_type id, time_t time) { assert_eid(id); - assert(Headers[id].type == ftDate_1123); /* must be of an appropriatre type */ + assert(Headers[id].type == ftDate_1123); /* must be of an appropriatre type */ assert(time >= 0); httpHeaderAddEntry(hdr, httpHeaderEntryCreate(id, NULL, mkrfc1123(time))); } @@ -552,7 +552,7 @@ void httpHeaderPutStr(HttpHeader * hdr, http_hdr_type id, const char *str) { assert_eid(id); - assert(Headers[id].type == ftStr); /* must be of an appropriatre type */ + assert(Headers[id].type == ftStr); /* must be of an appropriatre type */ assert(str); httpHeaderAddEntry(hdr, httpHeaderEntryCreate(id, NULL, str)); } @@ -614,7 +614,7 @@ httpHeaderGetStr(const HttpHeader * hdr, http_hdr_type id) assert_eid(id); assert(Headers[id].type == ftStr); /* must be of an appropriate type */ if ((e = httpHeaderFindEntry(hdr, id))) { - httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */ + httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */ return strBuf(e->value); } return NULL; @@ -628,7 +628,7 @@ httpHeaderGetLastStr(const HttpHeader * hdr, http_hdr_type id) assert_eid(id); assert(Headers[id].type == ftStr); /* must be of an appropriate type */ if ((e = httpHeaderFindLastEntry(hdr, id))) { - httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */ + httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */ return strBuf(e->value); } return NULL; @@ -739,7 +739,7 @@ httpHeaderEntryParseCreate(const char *field_start, const char *field_end) /* note: name_start == field_start */ const char *name_end = strchr(field_start, ':'); const int name_len = name_end ? name_end - field_start : 0; - const char *value_start = field_start + name_len + 1; /* skip ':' */ + const char *value_start = field_start + name_len + 1; /* skip ':' */ /* note: value_end == field_end */ HeaderEntryParsedCount++; @@ -794,7 +794,7 @@ httpHeaderNoteParsedEntry(http_hdr_type id, String context, int error) Headers[id].stat.parsCount++; if (error) { Headers[id].stat.errCount++; - debug(55,2) ("cannot parse hdr field: '%s: %s'\n", + debug(55, 2) ("cannot parse hdr field: '%s: %s'\n", strBuf(Headers[id].name), strBuf(context)); } } @@ -819,7 +819,7 @@ httpHeaderFldsPerHdrDumper(StoreEntry * sentry, int idx, double val, double size { if (count) storeAppendPrintf(sentry, "%2d\t %5d\t %5d\t %6.2f\n", - idx, (int)val, count, + idx, (int) val, count, xpercent(count, HeaderDestroyedCount)); } diff --git a/src/HttpHeaderTools.cc b/src/HttpHeaderTools.cc index e28585a364..ab65647785 100644 --- a/src/HttpHeaderTools.cc +++ b/src/HttpHeaderTools.cc @@ -1,5 +1,5 @@ /* - * $Id: HttpHeaderTools.cc,v 1.7 1998/04/02 05:35:22 rousskov Exp $ + * $Id: HttpHeaderTools.cc,v 1.8 1998/04/06 22:32:08 wessels Exp $ * * DEBUG: section 66 HTTP Header Tools * AUTHOR: Alex Rousskov @@ -34,7 +34,7 @@ static int httpHeaderStrCmp(const char *h1, const char *h2, int len); HttpHeaderFieldInfo * -httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs *attrs, int count) +httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs * attrs, int count) { int i; HttpHeaderFieldInfo *table = NULL; @@ -49,7 +49,7 @@ httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs *attrs, int count) /* sanity checks */ assert(id >= 0 && id < count); assert(attrs[i].name); - assert(info->id == 0 && info->type == 0); /* was not set before */ + assert(info->id == 0 && info->type == 0); /* was not set before */ /* copy and init fields */ info->id = id; info->type = attrs[i].type; @@ -62,7 +62,7 @@ httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs *attrs, int count) } void -httpHeaderDestroyFieldsInfo(HttpHeaderFieldInfo *table, int count) +httpHeaderDestroyFieldsInfo(HttpHeaderFieldInfo * table, int count) { int i; for (i = 0; i < count; ++i) @@ -71,14 +71,14 @@ httpHeaderDestroyFieldsInfo(HttpHeaderFieldInfo *table, int count) } void -httpHeaderMaskInit(HttpHeaderMask *mask) +httpHeaderMaskInit(HttpHeaderMask * mask) { memset(mask, 0, sizeof(*mask)); } /* calculates a bit mask of a given array */ void -httpHeaderCalcMask(HttpHeaderMask *mask, const int *enums, int count) +httpHeaderCalcMask(HttpHeaderMask * mask, const int *enums, int count) { int i; assert(mask && enums); @@ -93,15 +93,15 @@ httpHeaderCalcMask(HttpHeaderMask *mask, const int *enums, int count) int -httpHeaderIdByName(const char *name, int name_len, const HttpHeaderFieldInfo *info, int end) +httpHeaderIdByName(const char *name, int name_len, const HttpHeaderFieldInfo * info, int end) { int i; for (i = 0; i < end; ++i) { if (name_len >= 0 && name_len != strLen(info[i].name)) continue; if (!strncasecmp(name, strBuf(info[i].name), - name_len < 0 ? strLen(info[i].name) + 1 : name_len)) - return i; + name_len < 0 ? strLen(info[i].name) + 1 : name_len)) + return i; } return -1; } @@ -149,7 +149,7 @@ getStringPrefix(const char *str, const char *end) { #define SHORT_PREFIX_SIZE 512 LOCAL_ARRAY(char, buf, SHORT_PREFIX_SIZE); - const int sz = 1 + (end ? end-str : strlen(str)); + const int sz = 1 + (end ? end - str : strlen(str)); xstrncpy(buf, str, (sz > SHORT_PREFIX_SIZE) ? SHORT_PREFIX_SIZE : sz); return buf; } @@ -165,13 +165,13 @@ httpHeaderParseInt(const char *start, int *value) *value = atoi(start); if (!*value && !isdigit(*start)) { debug(66, 2) ("failed to parse an int header field near '%s'\n", start); - return 0; + return 0; } return 1; } int -httpHeaderParseSize(const char *start, size_t *value) +httpHeaderParseSize(const char *start, size_t * value) { int v; const int res = httpHeaderParseInt(start, &v); @@ -185,7 +185,8 @@ httpHeaderParseSize(const char *start, size_t *value) * parses a given string then packs compiled headers and compares the result * with the original, reports discrepancies */ -void httpHeaderTestParser(const char *hstr) +void +httpHeaderTestParser(const char *hstr) { static int bug_count = 0; int hstr_len; @@ -199,24 +200,23 @@ void httpHeaderTestParser(const char *hstr) if (!strncasecmp(hstr, "HTTP/", 5)) { const char *p = strchr(hstr, '\n'); if (p) - hstr = p+1; + hstr = p + 1; } /* skip invalid first line if any */ if (isspace(*hstr)) { const char *p = strchr(hstr, '\n'); if (p) - hstr = p+1; + hstr = p + 1; } hstr_len = strlen(hstr); /* skip terminator if any */ if (strstr(hstr, "\n\r\n")) hstr_len -= 2; - else - if (strstr(hstr, "\n\n")) + else if (strstr(hstr, "\n\n")) hstr_len -= 1; httpHeaderInit(&hdr); /* debugLevels[55] = 8; */ - parse_success = httpHeaderParse(&hdr, hstr, hstr+hstr_len); + parse_success = httpHeaderParse(&hdr, hstr, hstr + hstr_len); /* debugLevels[55] = 2; */ if (!parse_success) { debug(66, 2) ("TEST (%d): failed to parsed a header: {\n%s}\n", bug_count, hstr); @@ -229,7 +229,7 @@ void httpHeaderTestParser(const char *hstr) if ((pos = abs(httpHeaderStrCmp(hstr, mb.buf, hstr_len)))) { bug_count++; debug(66, 2) ("TEST (%d): hdr parsing bug (pos: %d near '%s'): expected: {\n%s} got: {\n%s}\n", - bug_count, pos, hstr+pos, hstr, mb.buf); + bug_count, pos, hstr + pos, hstr, mb.buf); } httpHeaderClean(&hdr); packerClean(&p); @@ -237,7 +237,7 @@ void httpHeaderTestParser(const char *hstr) } -/* like strncasecmp but ignores ws characters */ +/* like strncasecmp but ignores ws characters */ static int httpHeaderStrCmp(const char *h1, const char *h2, int len) { @@ -250,12 +250,16 @@ httpHeaderStrCmp(const char *h1, const char *h2, int len) while (1) { const char c1 = toupper(h1[len1 += xcountws(h1 + len1)]); const char c2 = toupper(h2[len2 += xcountws(h2 + len2)]); - if (c1 < c2) return -len1; - if (c1 > c2) return +len1; + if (c1 < c2) + return -len1; + if (c1 > c2) + return +len1; if (!c1 && !c2) return 0; - if (c1) len1++; - if (c2) len2++; + if (c1) + len1++; + if (c2) + len2++; } return 0; } diff --git a/src/HttpReply.cc b/src/HttpReply.cc index b6bc07b6ba..df0aa82b2e 100644 --- a/src/HttpReply.cc +++ b/src/HttpReply.cc @@ -1,6 +1,6 @@ /* - * $Id: HttpReply.cc,v 1.14 1998/04/05 20:32:44 wessels Exp $ + * $Id: HttpReply.cc,v 1.15 1998/04/06 22:32:09 wessels Exp $ * * DEBUG: section 58 HTTP Reply (Response) * AUTHOR: Alex Rousskov @@ -174,7 +174,8 @@ httpPackedReply(double ver, http_status status, const char *ctype, MemBuf httpPacked304Reply(const HttpReply * rep) { - static const http_hdr_type ImsEntries[] = { HDR_DATE, HDR_CONTENT_LENGTH, HDR_CONTENT_TYPE, HDR_EXPIRES, HDR_LAST_MODIFIED, /* eof */ HDR_OTHER }; + static const http_hdr_type ImsEntries[] = + {HDR_DATE, HDR_CONTENT_LENGTH, HDR_CONTENT_TYPE, HDR_EXPIRES, HDR_LAST_MODIFIED, /* eof */ HDR_OTHER}; http_hdr_type t; MemBuf mb; Packer p; @@ -271,17 +272,17 @@ httpReplyHdrCacheInit(HttpReply * rep) rep->content_range = httpHeaderGetContRange(hdr); str = httpHeaderGetStr(hdr, HDR_PROXY_CONNECTION); if (NULL == str) - str = httpHeaderGetStr(hdr, HDR_CONNECTION); /* @?@ FIX ME */ + str = httpHeaderGetStr(hdr, HDR_CONNECTION); /* @?@ FIX ME */ rep->proxy_keep_alive = str && 0 == strcasecmp(str, "Keep-Alive"); /* final adjustments */ /* The max-age directive takes priority over Expires, check it first */ if (rep->cache_control && rep->cache_control->max_age >= 0) rep->expires = squid_curtime + rep->cache_control->max_age; else - /* - * The HTTP/1.0 specs says that robust implementations should consider bad - * or malformed Expires header as equivalent to "expires immediately." - */ + /* + * The HTTP/1.0 specs says that robust implementations should consider bad + * or malformed Expires header as equivalent to "expires immediately." + */ if (rep->expires < 0 && httpHeaderHas(hdr, HDR_EXPIRES)) rep->expires = squid_curtime; } diff --git a/src/String.cc b/src/String.cc index 03824095de..44075c616f 100644 --- a/src/String.cc +++ b/src/String.cc @@ -1,5 +1,6 @@ + /* - * $Id: String.cc,v 1.3 1998/03/08 20:42:00 rousskov Exp $ + * $Id: String.cc,v 1.4 1998/04/06 22:32:10 wessels Exp $ * * DEBUG: section 67 String * AUTHOR: Duane Wessels @@ -31,7 +32,7 @@ #include "squid.h" static void -stringInitBuf(String *s, size_t sz) +stringInitBuf(String * s, size_t sz) { s->buf = memAllocBuf(sz, &sz); assert(sz < 65536); @@ -39,7 +40,7 @@ stringInitBuf(String *s, size_t sz) } void -stringInit(String *s, const char *str) +stringInit(String * s, const char *str) { assert(s); if (str) @@ -49,17 +50,17 @@ stringInit(String *s, const char *str) } void -stringLimitInit(String *s, const char *str, int len) +stringLimitInit(String * s, const char *str, int len) { assert(s && str); - stringInitBuf(s, len+1); + stringInitBuf(s, len + 1); s->len = len; xmemcpy(s->buf, str, len); s->buf[len] = '\0'; } String -stringDup(const String *s) +stringDup(const String * s) { String dup; assert(s); @@ -68,7 +69,7 @@ stringDup(const String *s) } void -stringClean(String *s) +stringClean(String * s) { assert(s); if (s->buf) @@ -77,14 +78,14 @@ stringClean(String *s) } void -stringReset(String *s, const char *str) +stringReset(String * s, const char *str) { stringClean(s); stringInit(s, str); } void -stringAppend(String *s, const char *str, int len) +stringAppend(String * s, const char *str, int len) { assert(s && s->buf); if (s->len + len < s->size) { @@ -95,7 +96,7 @@ stringAppend(String *s, const char *str, int len) snew.len = s->len + len; stringInitBuf(&snew, snew.len + 1); xmemcpy(snew.buf, s->buf, s->len); - xmemcpy(snew.buf+s->len, str, len); + xmemcpy(snew.buf + s->len, str, len); snew.buf[snew.len] = '\0'; stringClean(s); *s = snew; diff --git a/src/access_log.cc b/src/access_log.cc index cd50f2b2bb..82b1c36d89 100644 --- a/src/access_log.cc +++ b/src/access_log.cc @@ -1,7 +1,7 @@ /* - * $Id: access_log.cc,v 1.26 1998/03/31 08:37:29 wessels Exp $ + * $Id: access_log.cc,v 1.27 1998/04/06 22:32:10 wessels Exp $ * * DEBUG: section 46 Access Log * AUTHOR: Duane Wessels @@ -419,7 +419,7 @@ fvdbDumpForw(StoreEntry * e) } static -void +void fvdbFreeEntry(void *data) { fvdb_entry *fv = data; diff --git a/src/acl.cc b/src/acl.cc index b5f5c8651b..bee4f940a1 100644 --- a/src/acl.cc +++ b/src/acl.cc @@ -1,6 +1,6 @@ /* - * $Id: acl.cc,v 1.156 1998/04/04 01:44:00 kostas Exp $ + * $Id: acl.cc,v 1.157 1998/04/06 22:32:11 wessels Exp $ * * DEBUG: section 28 Access Control * AUTHOR: Duane Wessels @@ -744,17 +744,17 @@ aclParseProxyAuth(void *data) static void aclParseSnmpComm(void *data) { - acl_snmp_comm **q=data; + acl_snmp_comm **q = data; acl_snmp_comm *p; char *t; t = strtok(NULL, w_space); if (t) { - p=xcalloc(1, sizeof(acl_snmp_comm)); - p->name=xstrdup(t); - p->community=NULL; - *q=p; + p = xcalloc(1, sizeof(acl_snmp_comm)); + p->name = xstrdup(t); + p->community = NULL; + *q = p; } - t=strtok(NULL, w_space); + t = strtok(NULL, w_space); return; } diff --git a/src/client.cc b/src/client.cc index 01135069a0..268f6c5129 100644 --- a/src/client.cc +++ b/src/client.cc @@ -1,10 +1,6 @@ - - - - /* - * $Id: client.cc,v 1.63 1998/04/02 04:45:03 rousskov Exp $ + * $Id: client.cc,v 1.64 1998/04/06 22:32:12 wessels Exp $ * * DEBUG: section 0 WWW Client * AUTHOR: Harvest Derived @@ -237,7 +233,7 @@ main(int argc, char *argv[]) char *t; strncpy(extra_hdrs, optarg, sizeof(extra_hdrs)); while ((t = strstr(extra_hdrs, "\\n"))) - *t = '\r', *(t+1) = '\n'; + *t = '\r', *(t + 1) = '\n'; } break; case 'v': diff --git a/src/client_side.cc b/src/client_side.cc index 8f088bbaf4..63fc280baa 100644 --- a/src/client_side.cc +++ b/src/client_side.cc @@ -1,6 +1,6 @@ /* - * $Id: client_side.cc,v 1.256 1998/04/05 02:23:52 rousskov Exp $ + * $Id: client_side.cc,v 1.257 1998/04/06 22:32:13 wessels Exp $ * * DEBUG: section 33 Client-side Routines * AUTHOR: Duane Wessels @@ -1658,7 +1658,7 @@ parseHttpRequest(ConnStateData * conn, method_t * method_p, int *status, /* handle internal objects */ if (*url == '/' && strncmp(url, "/squid-internal/", 16) == 0) { /* prepend our name & port */ - http->uri = xstrdup(urlInternal(NULL, url+16)); + http->uri = xstrdup(urlInternal(NULL, url + 16)); http->internal = 1; } /* see if we running in Config2.Accel.on, if so got to convert it to URL */ diff --git a/src/event.cc b/src/event.cc index e1bd0008d6..3444155280 100644 --- a/src/event.cc +++ b/src/event.cc @@ -1,6 +1,6 @@ /* - * $Id: event.cc,v 1.12 1998/04/05 22:29:00 rousskov Exp $ + * $Id: event.cc,v 1.13 1998/04/06 22:32:14 wessels Exp $ * * DEBUG: section 41 Event Processing * AUTHOR: Henrik Nordstrom @@ -67,7 +67,7 @@ void eventAddIsh(const char *name, EVH * func, void *arg, time_t delta_ish) { if (delta_ish >= 3) { - const time_t two_third = (2*delta_ish)/3; + const time_t two_third = (2 * delta_ish) / 3; delta_ish = two_third + (squid_random() % two_third); } eventAdd(name, func, arg, delta_ish); diff --git a/src/main.cc b/src/main.cc index 955b3e0c6b..4dd57a1849 100644 --- a/src/main.cc +++ b/src/main.cc @@ -1,6 +1,6 @@ /* - * $Id: main.cc,v 1.243 1998/04/03 22:05:12 rousskov Exp $ + * $Id: main.cc,v 1.244 1998/04/06 22:32:15 wessels Exp $ * * DEBUG: section 1 Startup and Main Loop * AUTHOR: Harvest Derived @@ -497,7 +497,6 @@ mainInitialize(void) pconnInit(); eventInit(); } - serverConnectionsOpen(); if (theOutIcpConnection >= 0 && (!Config2.Accel.on || Config.onoff.accel_with_proxy)) neighbors_open(theOutIcpConnection); diff --git a/src/protos.h b/src/protos.h index a811b3761a..f12327b140 100644 --- a/src/protos.h +++ b/src/protos.h @@ -514,8 +514,8 @@ extern peer *peerGetSomeParent(request_t *, hier_code *); extern void peerSelectInit(void); /* peer_digest.c */ -extern void peerDigestValidate(peer *p); -extern void peerDigestRequest(peer *p); +extern void peerDigestValidate(peer * p); +extern void peerDigestRequest(peer * p); extern void protoDispatch(int, StoreEntry *, request_t *); diff --git a/src/snmp_core.cc b/src/snmp_core.cc index 3d05fb44bb..4d184b915a 100644 --- a/src/snmp_core.cc +++ b/src/snmp_core.cc @@ -1,5 +1,5 @@ /* - * $Id: snmp_core.cc,v 1.1 1998/04/04 01:44:04 kostas Exp $ + * $Id: snmp_core.cc,v 1.2 1998/04/06 22:32:18 wessels Exp $ * * DEBUG: section 49 SNMP support * AUTHOR: Kostas Anagnostakis @@ -174,9 +174,9 @@ snmpHandleUdp(int sock, void *not_used) } void -snmpAgentParseDone(int errstat, void * data) +snmpAgentParseDone(int errstat, void *data) { - snmp_request_t *snmp_rq=(snmp_request_t *)data; + snmp_request_t *snmp_rq = (snmp_request_t *) data; LOCAL_ARRAY(char, deb_line, 4096); int sock = snmp_rq->sock; long this_reqid = snmp_rq->reqid; @@ -185,7 +185,7 @@ snmpAgentParseDone(int errstat, void * data) if (memcmp(&snmp_rq->from, &local_snmpd, sizeof(struct sockaddr_in)) == 0) { /* look it up */ - if (snmpFwd_removePending(&snmp_rq->from, this_reqid)) { /* failed */ + if (snmpFwd_removePending(&snmp_rq->from, this_reqid)) { /* failed */ debug(49, 2) ("snmp: bogus response from %s.\n", inet_ntoa(snmp_rq->from.sin_addr)); if (snmp_rq->community) @@ -507,7 +507,7 @@ snmpConnectionClose(void) void snmpAgentParse(void *data) { - snmp_request_t * rq=(snmp_request_t *)data; + snmp_request_t *rq = (snmp_request_t *) data; u_char *buf = rq->buf; int len = rq->len; @@ -519,22 +519,22 @@ snmpAgentParse(void *data) PDU = snmp_pdu_create(0); Community = snmp_parse(Session, PDU, buf, len); - if (!snmp_coexist_V2toV1(PDU)) { /* incompatibility */ - debug(49, 3) ("snmpAgentParse: Incompatible V2 packet.\n"); - snmp_free_pdu(PDU); - snmpAgentParseDone(0, rq); - return; + if (!snmp_coexist_V2toV1(PDU)) { /* incompatibility */ + debug(49, 3) ("snmpAgentParse: Incompatible V2 packet.\n"); + snmp_free_pdu(PDU); + snmpAgentParseDone(0, rq); + return; } rq->community = Community; rq->PDU = PDU; debug(49, 5) ("snmpAgentParse: reqid=[%d]\n", PDU->reqid); if (!Community) { - debug(49, 2) ("snmpAgentParse: WARNING: Could not parse community\n"); + debug(49, 2) ("snmpAgentParse: WARNING: Could not parse community\n"); - snmp_free_pdu(PDU); - snmpAgentParseDone(0, rq); - return; + snmp_free_pdu(PDU); + snmpAgentParseDone(0, rq); + return; } snmpAclCheckStart(rq); } @@ -553,81 +553,81 @@ snmpAgentResponse(struct snmp_pdu *PDU) /* Create a response */ Answer = snmp_pdu_create(SNMP_PDU_RESPONSE); if (Answer == NULL) - return (NULL); + return (NULL); Answer->reqid = PDU->reqid; Answer->errindex = 0; if (PDU->command == SNMP_PDU_GET) { - RespVars = &(Answer->variables); - /* Loop through all variables */ - for (VarPtrP = &(PDU->variables); - *VarPtrP; - VarPtrP = &((*VarPtrP)->next_variable)) { - VarPtr = *VarPtrP; - - index++; - - /* Find the parsing function for this variable */ - ParseFn = oidlist_Find(VarPtr->name, VarPtr->name_length); - - if (ParseFn == NULL) { - Answer->errstat = SNMP_ERR_NOSUCHNAME; - debug(49, 5) ("snmpAgentResponse: No such oid. "); - } else - VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat)); - - /* Was there an error? */ - if ((Answer->errstat != SNMP_ERR_NOERROR) || - (VarNew == NULL)) { - Answer->errindex = index; - debug(49, 5) ("snmpAgentParse: successful.\n"); - /* Just copy the rest of the variables. Quickly. */ - *RespVars = VarPtr; - *VarPtrP = NULL; - return (Answer); - } - /* No error. Insert this var at the end, and move on to the next. - */ - *RespVars = VarNew; - RespVars = &(VarNew->next_variable); - } - return (Answer); + RespVars = &(Answer->variables); + /* Loop through all variables */ + for (VarPtrP = &(PDU->variables); + *VarPtrP; + VarPtrP = &((*VarPtrP)->next_variable)) { + VarPtr = *VarPtrP; + + index++; + + /* Find the parsing function for this variable */ + ParseFn = oidlist_Find(VarPtr->name, VarPtr->name_length); + + if (ParseFn == NULL) { + Answer->errstat = SNMP_ERR_NOSUCHNAME; + debug(49, 5) ("snmpAgentResponse: No such oid. "); + } else + VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat)); + + /* Was there an error? */ + if ((Answer->errstat != SNMP_ERR_NOERROR) || + (VarNew == NULL)) { + Answer->errindex = index; + debug(49, 5) ("snmpAgentParse: successful.\n"); + /* Just copy the rest of the variables. Quickly. */ + *RespVars = VarPtr; + *VarPtrP = NULL; + return (Answer); + } + /* No error. Insert this var at the end, and move on to the next. + */ + *RespVars = VarNew; + RespVars = &(VarNew->next_variable); + } + return (Answer); } else if (PDU->command == SNMP_PDU_GETNEXT) { - oid *TmpOidName; - int TmpOidNameLen = 0; - - /* Find the next OID. */ - VarPtr = PDU->variables; - - ParseFn = oidlist_Next(VarPtr->name, VarPtr->name_length, - &(TmpOidName), (snint *) & (TmpOidNameLen)); - - if (ParseFn == NULL) { - Answer->errstat = SNMP_ERR_NOSUCHNAME; - debug(49, 5) ("snmpAgentResponse: No such oid: "); - snmpDebugOid(5, VarPtr->name, VarPtr->name_length); - } else { - xfree(VarPtr->name); - VarPtr->name = TmpOidName; - VarPtr->name_length = TmpOidNameLen; - VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat)); - } - - /* Was there an error? */ - if (Answer->errstat != SNMP_ERR_NOERROR) { - Answer->errindex = 1; - - /* Just copy this variable */ - Answer->variables = VarPtr; - PDU->variables = NULL; - } else { - Answer->variables = VarNew; - } - - /* Done. Return this PDU */ - return (Answer); - } /* end SNMP_PDU_GETNEXT */ + oid *TmpOidName; + int TmpOidNameLen = 0; + + /* Find the next OID. */ + VarPtr = PDU->variables; + + ParseFn = oidlist_Next(VarPtr->name, VarPtr->name_length, + &(TmpOidName), (snint *) & (TmpOidNameLen)); + + if (ParseFn == NULL) { + Answer->errstat = SNMP_ERR_NOSUCHNAME; + debug(49, 5) ("snmpAgentResponse: No such oid: "); + snmpDebugOid(5, VarPtr->name, VarPtr->name_length); + } else { + xfree(VarPtr->name); + VarPtr->name = TmpOidName; + VarPtr->name_length = TmpOidNameLen; + VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat)); + } + + /* Was there an error? */ + if (Answer->errstat != SNMP_ERR_NOERROR) { + Answer->errindex = 1; + + /* Just copy this variable */ + Answer->variables = VarPtr; + PDU->variables = NULL; + } else { + Answer->variables = VarNew; + } + + /* Done. Return this PDU */ + return (Answer); + } /* end SNMP_PDU_GETNEXT */ debug(49, 5) ("snmpAgentResponse: Ignoring PDU %d unknown command\n", PDU->command); snmp_free_pdu(Answer); return (NULL); @@ -640,8 +640,8 @@ snmpDebugOid(int lvl, oid * Name, snint Len) int x; objid[0] = '\0'; for (x = 0; x < Len; x++) { - snprintf(mbuf, 16, ".%u", (unsigned char) Name[x]); - strcat(objid, mbuf); + snprintf(mbuf, 16, ".%u", (unsigned char) Name[x]); + strcat(objid, mbuf); } debug(49, lvl) (" oid = %s\n", objid); } @@ -655,21 +655,21 @@ oidcmp(oid * A, snint ALen, oid * B, snint BLen) /* Compare the first M bytes. */ while (m) { - if (*aptr < *bptr) - return (-1); - if (*aptr++ > *bptr++) - return (1); - m--; + if (*aptr < *bptr) + return (-1); + if (*aptr++ > *bptr++) + return (1); + m--; } /* The first M bytes were identical. So, they share the same * root. The shorter one must come first. */ if (ALen < BLen) - return (-1); + return (-1); if (ALen > BLen) - return (1); + return (1); /* Same length, all bytes identical. Must be the same OID. */ return (0); @@ -685,22 +685,22 @@ oidncmp(oid * A, snint ALen, oid * B, snint BLen, snint CompLen) /* Compare the first M bytes. */ while (count != m) { - if (*aptr < *bptr) - return (-1); - if (*aptr++ > *bptr++) - return (1); - count++; + if (*aptr < *bptr) + return (-1); + if (*aptr++ > *bptr++) + return (1); + count++; } if (m == CompLen) - return (0); + return (0); if (ALen < BLen) - return (-1); + return (-1); if (ALen > BLen) - return (1); + return (1); /* Same length, all bytes identical. Must be the same OID. */ return (0); @@ -715,7 +715,7 @@ oiddup(oid * A, snint ALen) Ans = (oid *) xmalloc(sizeof(oid) * ALen); if (Ans) - memcpy(Ans, A, (sizeof(oid) * ALen)); + memcpy(Ans, A, (sizeof(oid) * ALen)); return (Ans); } @@ -733,25 +733,25 @@ oidlist_Find(oid * Src, snint SrcLen) int ret; debug(49, 7) ("oidlist_Find: Called.\n "); - snmpDebugOid(7, Src, SrcLen); + snmpDebugOid(7, Src, SrcLen); for (Ptr = squidMIBList; Ptr->GetFn; Ptr++) { - ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen); + ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen); - if (!ret) { + if (!ret) { - /* Cool. We found the mib it's in. Let it find the function. - */ - debug(49, 7) ("oidlist_Find: found, returning GetFn Ptr! \n"); + /* Cool. We found the mib it's in. Let it find the function. + */ + debug(49, 7) ("oidlist_Find: found, returning GetFn Ptr! \n"); - return ((*Ptr->GetFn) (Src, SrcLen)); - } - if (ret < 0) { - debug(49, 7) ("oidlist_Find: We just passed it, so it doesn't exist.\n "); - /* We just passed it, so it doesn't exist. */ - return (NULL); - } + return ((*Ptr->GetFn) (Src, SrcLen)); + } + if (ret < 0) { + debug(49, 7) ("oidlist_Find: We just passed it, so it doesn't exist.\n "); + /* We just passed it, so it doesn't exist. */ + return (NULL); + } } debug(49, 5) ("oidlist_Find: the request was past the end. It doesn't exist.\n"); @@ -775,39 +775,39 @@ oidlist_Next(oid * Src, snint SrcLen, oid ** DestP, snint * DestLenP) for (Ptr = squidMIBList; Ptr->GetNextFn; Ptr++) { - /* Only look at as much as we have stored */ - ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen); - - if (!ret) { - debug(49, 6) ("oidlist_Next: Checking MIB\n"); - - /* Cool. We found the mib it's in. Ask it. - */ - while (Ptr != NULL && Ptr->GetNextFn) { - Fn = ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP)); - if (Fn == NULL) { - /* If this returned NULL, we're looking for the first - * in the next MIB. - */ - debug(49, 6) ("oidlist_Next: Not in this entry. Trying next.\n"); - Ptr++; - continue; - } - return Fn; - } - /* Return what we found. NULL if it wasn't in the MIB, and there - * were no more MIBs. - */ - debug(49, 3) ("oidlist_Next: No next mib.\n"); - return NULL; - } - if (ret < 0) { - /* We just passed the mib it would be in. Return - * the next in this MIB. - */ - debug(49, 3) ("oidlist_Next: Passed mib. Checking this one.\n"); - return ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP)); - } + /* Only look at as much as we have stored */ + ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen); + + if (!ret) { + debug(49, 6) ("oidlist_Next: Checking MIB\n"); + + /* Cool. We found the mib it's in. Ask it. + */ + while (Ptr != NULL && Ptr->GetNextFn) { + Fn = ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP)); + if (Fn == NULL) { + /* If this returned NULL, we're looking for the first + * in the next MIB. + */ + debug(49, 6) ("oidlist_Next: Not in this entry. Trying next.\n"); + Ptr++; + continue; + } + return Fn; + } + /* Return what we found. NULL if it wasn't in the MIB, and there + * were no more MIBs. + */ + debug(49, 3) ("oidlist_Next: No next mib.\n"); + return NULL; + } + if (ret < 0) { + /* We just passed the mib it would be in. Return + * the next in this MIB. + */ + debug(49, 3) ("oidlist_Next: Passed mib. Checking this one.\n"); + return ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP)); + } } /* We get here if the request was past the end. It doesn't exist. */ @@ -827,18 +827,17 @@ gen_getMax() return &maddr; } -int +int fd_getMax() { fde *f; int cnt = 0, num = 0; while (cnt < Squid_MaxFD) { - f = &fd_table[cnt++]; - if (!f->open) - continue; - if (f->type != FD_SOCKET) - num++; + f = &fd_table[cnt++]; + if (!f->open) + continue; + if (f->type != FD_SOCKET) + num++; } return num; } - diff --git a/src/stat.cc b/src/stat.cc index 487d42ce6f..16081e8ee9 100644 --- a/src/stat.cc +++ b/src/stat.cc @@ -1,6 +1,6 @@ /* - * $Id: stat.cc,v 1.223 1998/04/06 22:00:29 rousskov Exp $ + * $Id: stat.cc,v 1.224 1998/04/06 22:32:20 wessels Exp $ * * DEBUG: section 18 Cache Manager Statistics * AUTHOR: Harvest Derived @@ -1032,7 +1032,7 @@ statMedianSvc(int interval, int which) int get_median_svc(int interval, int which) { - return(int) statMedianSvc(interval, which); + return (int) statMedianSvc(interval, which); } StatCounters * diff --git a/src/store_digest.cc b/src/store_digest.cc index 01e5ce48b9..d849f82191 100644 --- a/src/store_digest.cc +++ b/src/store_digest.cc @@ -1,5 +1,5 @@ /* - * $Id: store_digest.cc,v 1.2 1998/04/03 22:05:14 rousskov Exp $ + * $Id: store_digest.cc,v 1.3 1998/04/06 22:32:21 wessels Exp $ * * DEBUG: section 71 Store Digest Manager * AUTHOR: Alex Rousskov @@ -39,13 +39,13 @@ typedef struct { typedef struct { StoreDigestCBlock cblock; - int rebuild_lock; /* bucket number */ - StoreEntry *rewrite_lock; /* store entry with the digest */ - const char *other_lock; /* used buy external modules to pause rebuilds and rewrites */ + int rebuild_lock; /* bucket number */ + StoreEntry *rewrite_lock; /* store entry with the digest */ + const char *other_lock; /* used buy external modules to pause rebuilds and rewrites */ int rebuild_offset; int rewrite_offset; int rebuild_count; - int rewrite_count; + int rewrite_count; } StoreDigestState; /* @@ -55,9 +55,9 @@ typedef struct { /* fake url suffix */ static const char *StoreDigestUrl = "cache_digest"; /* how often we want to rebuild the digest, seconds */ -static const time_t StoreDigestRebuildPeriod = 60*60; +static const time_t StoreDigestRebuildPeriod = 60 * 60; /* how often we want to rewrite the digest, seconds */ -static const time_t StoreDigestRewritePeriod = 60*60; +static const time_t StoreDigestRewritePeriod = 60 * 60; /* how many bytes to swap out at a time */ static const int StoreDigestSwapOutChunkSize = SM_PAGE_SIZE; /* portion (0,1] of a hash table to be rescanned at a time */ @@ -71,9 +71,9 @@ static void storeDigestRebuild(void *datanotused); static void storeDigestRebuildFinish(); static void storeDigestRebuildStep(void *datanotused); static void storeDigestRewrite(); -static void storeDigestRewriteFinish(StoreEntry *e); -static void storeDigestSwapOutStep(StoreEntry *e); -static void storeDigestCBlockSwapOut(StoreEntry *e); +static void storeDigestRewriteFinish(StoreEntry * e); +static void storeDigestSwapOutStep(StoreEntry * e); +static void storeDigestCBlockSwapOut(StoreEntry * e); void @@ -86,7 +86,7 @@ storeDigestInit() * Use 1.5*max#entries because 2*max#entries gives about 40% utilization. */ #if SQUID_MAINTAIN_CACHE_DIGEST - const int cap = (int)(1.5 * Config.Swap.maxSize / Config.Store.avgObjectSize); + const int cap = (int) (1.5 * Config.Swap.maxSize / Config.Store.avgObjectSize); store_digest = cacheDigestCreate(cap); debug(71, 1) ("Using %d byte cache digest; rebuild/rewrite every %d/%d sec\n", store_digest->mask_size, StoreDigestRebuildPeriod, StoreDigestRewritePeriod); @@ -96,7 +96,7 @@ storeDigestInit() #endif memset(&sd_state, 0, sizeof(sd_state)); cachemgrRegister("store_digest", "Store Digest", - storeDigestReport, 0); + storeDigestReport, 0); } /* you probably want to call this before storeDigestRewriteContinue() */ @@ -108,7 +108,8 @@ storeDigestScheduleRebuild() /* externally initiated rewrite (inits store entry and pauses) */ void -storeDigestRewriteStart(const char *initiator) { +storeDigestRewriteStart(const char *initiator) +{ assert(initiator); assert(!sd_state.other_lock); sd_state.other_lock = initiator; @@ -117,7 +118,8 @@ storeDigestRewriteStart(const char *initiator) { /* continue externally initiated rewrite */ void -storeDigestRewriteContinue(const char *initiator) { +storeDigestRewriteContinue(const char *initiator) +{ assert(initiator); assert(!strcmp(sd_state.other_lock, initiator)); assert(sd_state.rewrite_lock); @@ -139,7 +141,7 @@ storeDigestRebuild(void *datanotused) sd_state.rebuild_offset = 0; /* not clean()! */ cacheDigestClear(store_digest); - debug(71, 2) ("storeDigestRebuild: start rebuild #%d\n", sd_state.rebuild_count+1); + debug(71, 2) ("storeDigestRebuild: start rebuild #%d\n", sd_state.rebuild_count + 1); storeDigestRebuildStep(NULL); } @@ -161,7 +163,7 @@ storeDigestRebuildFinish() static void storeDigestRebuildStep(void *datanotused) { - int bcount = (int)ceil(store_hash_buckets*StoreDigestRebuildChunkPercent); + int bcount = (int) ceil(store_hash_buckets * StoreDigestRebuildChunkPercent); assert(sd_state.rebuild_lock); if (sd_state.rebuild_offset + bcount > store_hash_buckets) bcount = store_hash_buckets - sd_state.rebuild_offset; @@ -198,7 +200,7 @@ storeDigestRewrite(void *datanotused) debug(71, 1) ("storeDigestRewrite: overlap detected, consider increasing rewrite period\n"); return; } - debug(71, 2) ("storeDigestRewrite: start rewrite #%d\n", sd_state.rewrite_count+1); + debug(71, 2) ("storeDigestRewrite: start rewrite #%d\n", sd_state.rewrite_count + 1); /* make new store entry */ snprintf(url, sizeof(url), "http://%s:%d/squid-internal/%s", getMyHostname(), Config.Port.http->i, StoreDigestUrl); @@ -214,7 +216,7 @@ storeDigestRewrite(void *datanotused) e->mem_obj->request = requestLink(urlParse(METHOD_GET, url)); httpReplyReset(e->mem_obj->reply); httpReplySetHeaders(e->mem_obj->reply, 1.0, 200, "Cache Digest OK", - "application/cache-digest", store_digest->mask_size+sizeof(sd_state.cblock), + "application/cache-digest", store_digest->mask_size + sizeof(sd_state.cblock), squid_curtime, squid_curtime + StoreDigestRewritePeriod); storeBuffer(e); httpReplySwapOut(e->mem_obj->reply, e); @@ -229,7 +231,7 @@ storeDigestRewrite(void *datanotused) /* finishes swap out sequence for the digest; schedules next rewrite */ static void -storeDigestRewriteFinish(StoreEntry *e) +storeDigestRewriteFinish(StoreEntry * e) { assert(e); assert(e == sd_state.rewrite_lock); @@ -244,7 +246,7 @@ storeDigestRewriteFinish(StoreEntry *e) /* swaps out one digest "chunk" per invocation; schedules next swap out */ static void -storeDigestSwapOutStep(StoreEntry *e) +storeDigestSwapOutStep(StoreEntry * e) { int chunk_size = StoreDigestSwapOutChunkSize; assert(e); @@ -266,22 +268,22 @@ storeDigestSwapOutStep(StoreEntry *e) if (sd_state.rewrite_offset >= store_digest->mask_size) storeDigestRewriteFinish(e); else - eventAdd("storeDigestSwapOutStep", (EVH*) storeDigestSwapOutStep, e, 0); + eventAdd("storeDigestSwapOutStep", (EVH *) storeDigestSwapOutStep, e, 0); } static void -storeDigestCBlockSwapOut(StoreEntry *e) +storeDigestCBlockSwapOut(StoreEntry * e) { /* * when we actually start using control block, do not forget to convert to * network byte order if needed */ memset(&sd_state.cblock, 0, sizeof(sd_state.cblock)); - storeAppend(e, (char*) &sd_state.cblock, sizeof(sd_state.cblock)); + storeAppend(e, (char *) &sd_state.cblock, sizeof(sd_state.cblock)); } void -storeDigestReport(StoreEntry *e) +storeDigestReport(StoreEntry * e) { if (store_digest) { cacheDigestReport(store_digest, "store", e); @@ -289,4 +291,3 @@ storeDigestReport(StoreEntry *e) storeAppendPrintf(e, "store digest: disabled.\n"); } } - diff --git a/src/store_rebuild.cc b/src/store_rebuild.cc index 9ae0dab87e..5b00e0c1da 100644 --- a/src/store_rebuild.cc +++ b/src/store_rebuild.cc @@ -1,5 +1,5 @@ /* - * $Id: store_rebuild.cc,v 1.29 1998/04/04 04:50:32 wessels Exp $ + * $Id: store_rebuild.cc,v 1.30 1998/04/06 22:32:21 wessels Exp $ * * DEBUG: section 20 Store Rebuild Routines * AUTHOR: Duane Wessels @@ -560,7 +560,7 @@ storeAddDiskRestore(const cache_key * key, e->ping_status = PING_NONE; EBIT_CLR(e->flag, ENTRY_VALIDATED); storeDirMapBitSet(e->swap_file_number); - storeHashInsert(e, key); /* do it after we clear KEY_PRIVATE */ + storeHashInsert(e, key); /* do it after we clear KEY_PRIVATE */ return e; } diff --git a/src/structs.h b/src/structs.h index 28e0eb43f3..1565aaf1ac 100644 --- a/src/structs.h +++ b/src/structs.h @@ -1,6 +1,4 @@ - - struct _acl_ip_data { struct in_addr addr1; /* if addr2 non-zero then its a range */ struct in_addr addr2; diff --git a/src/test_cache_digest.cc b/src/test_cache_digest.cc index af43bb6eba..b18899c9ff 100644 --- a/src/test_cache_digest.cc +++ b/src/test_cache_digest.cc @@ -1,6 +1,6 @@ /* - * $Id: test_cache_digest.cc,v 1.16 1998/04/02 17:11:28 rousskov Exp $ + * $Id: test_cache_digest.cc,v 1.17 1998/04/06 22:32:22 wessels Exp $ * * AUTHOR: Alex Rousskov * @@ -50,7 +50,7 @@ struct _Cache { Cache *peer; CacheQueryStats qstats; int count; /* #currently cached entries */ - int req_count; /* #requests to this cache */ + int req_count; /* #requests to this cache */ int bad_add_count; /* #duplicate adds */ int bad_del_count; /* #dels with no prior add */ }; @@ -67,27 +67,29 @@ typedef struct _CacheEntry { typedef struct { cache_key key[MD5_DIGEST_CHARS]; time_t timestamp; - short int use_icp; /* true/false */ + short int use_icp; /* true/false */ } RawAccessLogEntry; -typedef enum { frError = -2, frMore = -1, frEof = 0, frOk = 1 } fr_result; +typedef enum { + frError = -2, frMore = -1, frEof = 0, frOk = 1 +} fr_result; typedef struct _FileIterator FileIterator; -typedef fr_result (*FI_READER)(FileIterator *fi); +typedef fr_result(*FI_READER) (FileIterator * fi); struct _FileIterator { const char *fname; FILE *file; - time_t inner_time; /* timestamp of the current entry */ - time_t time_offset; /* to adjust time set by reader */ - int line_count; /* number of lines scanned */ - int bad_line_count; /* number of parsing errors */ - int time_warp_count;/* number of out-of-order entries in the file */ - FI_READER reader; /* reads next entry and updates inner_time */ - void *entry; /* buffer for the current entry, freed with xfree() */ + time_t inner_time; /* timestamp of the current entry */ + time_t time_offset; /* to adjust time set by reader */ + int line_count; /* number of lines scanned */ + int bad_line_count; /* number of parsing errors */ + int time_warp_count; /* number of out-of-order entries in the file */ + FI_READER reader; /* reads next entry and updates inner_time */ + void *entry; /* buffer for the current entry, freed with xfree() */ }; /* globals */ -static time_t cur_time = -1; /* timestamp of the current log entry */ +static time_t cur_time = -1; /* timestamp of the current log entry */ #if 0 @@ -132,7 +134,7 @@ methodStrToId(const char *s) /* FileIterator */ -static void fileIteratorAdvance(FileIterator *fi); +static void fileIteratorAdvance(FileIterator * fi); static FileIterator * fileIteratorCreate(const char *fname, FI_READER reader) @@ -152,7 +154,7 @@ fileIteratorCreate(const char *fname, FI_READER reader) } static void -fileIteratorDestroy(FileIterator *fi) +fileIteratorDestroy(FileIterator * fi) { assert(fi); if (fi->file) { @@ -164,7 +166,7 @@ fileIteratorDestroy(FileIterator *fi) } static void -fileIteratorSetCurTime(FileIterator *fi, time_t ct) +fileIteratorSetCurTime(FileIterator * fi, time_t ct) { assert(fi); assert(fi->inner_time > 0); @@ -172,7 +174,7 @@ fileIteratorSetCurTime(FileIterator *fi, time_t ct) } static void -fileIteratorAdvance(FileIterator *fi) +fileIteratorAdvance(FileIterator * fi) { int res; assert(fi); @@ -185,15 +187,13 @@ fileIteratorAdvance(FileIterator *fi) fi->inner_time = last_time; else fi->inner_time += fi->time_offset; - if (res == frError) + if (res == frError) fi->bad_line_count++; - else - if (res == frEof) { + else if (res == frEof) { fprintf(stderr, "exhausted %s (%d entries) at %s", fi->fname, fi->line_count, ctime(&fi->inner_time)); fi->inner_time = -1; - } else - if (fi->inner_time < last_time) { + } else if (fi->inner_time < last_time) { assert(last_time >= 0); fi->time_warp_count++; fi->inner_time = last_time; @@ -249,11 +249,11 @@ cacheDestroy(Cache * cache) hash = cache->hash; /* destroy hash table contents */ for (e = hash_first(hash); e; e = hash_next(hash)) { - hash_remove_link(hash, (hash_link*)e); + hash_remove_link(hash, (hash_link *) e); cacheEntryDestroy(e); } /* destroy the hash table itself */ - hashFreeMemory(hash); + hashFreeMemory(hash); if (cache->digest) cacheDigestDestroy(cache->digest); xfree(cache); @@ -272,7 +272,7 @@ cacheResetDigest(Cache * cache) if (cache->digest) cacheDigestDestroy(cache->digest); hash = cache->hash; - cache->digest = cacheDigestCreate(2 * cache->count + 1); /* 50% utilization */ + cache->digest = cacheDigestCreate(2 * cache->count + 1); /* 50% utilization */ if (!cache->count) return; gettimeofday(&t_start, NULL); @@ -319,21 +319,21 @@ cacheQueryPeer(Cache * cache, const cache_key * key) } static void -cacheQueryReport(Cache * cache, CacheQueryStats *stats) +cacheQueryReport(Cache * cache, CacheQueryStats * stats) { - fprintf(stdout, "%s: peer queries: %d (%d%%)\n", - cache->name, + fprintf(stdout, "%s: peer queries: %d (%d%%)\n", + cache->name, stats->query_count, xpercentInt(stats->query_count, cache->req_count) ); fprintf(stdout, "%s: t-hit: %d (%d%%) t-miss: %d (%d%%) t-*: %d (%d%%)\n", - cache->name, + cache->name, stats->true_hit_count, xpercentInt(stats->true_hit_count, stats->query_count), stats->true_miss_count, xpercentInt(stats->true_miss_count, stats->query_count), stats->true_hit_count + stats->true_miss_count, xpercentInt(stats->true_hit_count + stats->true_miss_count, stats->query_count) ); fprintf(stdout, "%s: f-hit: %d (%d%%) f-miss: %d (%d%%) f-*: %d (%d%%)\n", - cache->name, + cache->name, stats->false_hit_count, xpercentInt(stats->false_hit_count, stats->query_count), stats->false_miss_count, xpercentInt(stats->false_miss_count, stats->query_count), stats->false_hit_count + stats->false_miss_count, @@ -344,14 +344,14 @@ cacheQueryReport(Cache * cache, CacheQueryStats *stats) static void cacheReport(Cache * cache) { - fprintf(stdout, "%s: entries: %d reqs: %d bad-add: %d bad-del: %d\n", + fprintf(stdout, "%s: entries: %d reqs: %d bad-add: %d bad-del: %d\n", cache->name, cache->count, cache->req_count, cache->bad_add_count, cache->bad_del_count); } static void -cacheFetch(Cache *cache, const RawAccessLogEntry *e) +cacheFetch(Cache * cache, const RawAccessLogEntry * e) { assert(e); cache->req_count++; @@ -393,8 +393,8 @@ accessLogReader(FileIterator * fi) memset(fi->entry, 0, sizeof(RawAccessLogEntry)); entry = fi->entry; if (!fgets(buf, sizeof(buf), fi->file)) - return frEof; /* eof */ - entry->timestamp = fi->inner_time = (time_t)atoi(buf); + return frEof; /* eof */ + entry->timestamp = fi->inner_time = (time_t) atoi(buf); url = strstr(buf, "://"); hier = url ? strstr(url, " - ") : NULL; @@ -416,35 +416,36 @@ accessLogReader(FileIterator * fi) * fname, scanned_count, method, buf); */ return frError; } - while (*url) url--; + while (*url) + url--; url++; *hier = '\0'; hier += 3; *strchr(hier, '/') = '\0'; /*fprintf(stdout, "%s:%d: %s %s %s\n", * fname, count, method, url, hier); */ - entry->use_icp = strcmp(hier, "NONE"); - /* no ICP lookup for these status codes */ -/* strcmp(hier, "NONE") && - strcmp(hier, "DIRECT") && - strcmp(hier, "FIREWALL_IP_DIRECT") && - strcmp(hier, "LOCAL_IP_DIRECT") && - strcmp(hier, "NO_DIRECT_FAIL") && - strcmp(hier, "NO_PARENT_DIRECT") && - strcmp(hier, "SINGLE_PARENT") && - strcmp(hier, "PASSTHROUGH_PARENT") && - strcmp(hier, "SSL_PARENT_MISS") && - strcmp(hier, "DEFAULT_PARENT"); -*/ + entry->use_icp = strcmp(hier, "NONE"); + /* no ICP lookup for these status codes */ +/* strcmp(hier, "NONE") && + * strcmp(hier, "DIRECT") && + * strcmp(hier, "FIREWALL_IP_DIRECT") && + * strcmp(hier, "LOCAL_IP_DIRECT") && + * strcmp(hier, "NO_DIRECT_FAIL") && + * strcmp(hier, "NO_PARENT_DIRECT") && + * strcmp(hier, "SINGLE_PARENT") && + * strcmp(hier, "PASSTHROUGH_PARENT") && + * strcmp(hier, "SSL_PARENT_MISS") && + * strcmp(hier, "DEFAULT_PARENT"); + */ memcpy(entry->key, storeKeyPublic(url, method_id), sizeof(entry->key)); /*fprintf(stdout, "%s:%d: %s %s %s %s\n", - fname, count, method, storeKeyText(entry->key), url, hier); */ + * fname, count, method, storeKeyText(entry->key), url, hier); */ return frOk; } static void -cachePurge(Cache *cache, storeSwapLogData *s, int update_digest) +cachePurge(Cache * cache, storeSwapLogData * s, int update_digest) { CacheEntry *olde = (CacheEntry *) hash_lookup(cache->hash, s->key); if (!olde) { @@ -460,7 +461,7 @@ cachePurge(Cache *cache, storeSwapLogData *s, int update_digest) } static void -cacheStore(Cache *cache, storeSwapLogData *s, int update_digest) +cacheStore(Cache * cache, storeSwapLogData * s, int update_digest) { CacheEntry *olde = (CacheEntry *) hash_lookup(cache->hash, s->key); if (olde) { @@ -475,17 +476,17 @@ cacheStore(Cache *cache, storeSwapLogData *s, int update_digest) } static void -cacheUpdateStore(Cache *cache, storeSwapLogData *s, int update_digest) +cacheUpdateStore(Cache * cache, storeSwapLogData * s, int update_digest) { switch (s->op) { - case SWAP_LOG_ADD: - cacheStore(cache, s, update_digest); - break; - case SWAP_LOG_DEL: - cachePurge(cache, s, update_digest); - break; - default: - assert(0); + case SWAP_LOG_ADD: + cacheStore(cache, s, update_digest); + break; + case SWAP_LOG_DEL: + cachePurge(cache, s, update_digest); + break; + default: + assert(0); } } @@ -501,7 +502,7 @@ int main(int argc, char *argv[]) { FileIterator **fis = NULL; - const int fi_count = argc-1; + const int fi_count = argc - 1; int active_fi_count = 0; time_t ready_time; Cache *them, *us; @@ -519,19 +520,20 @@ main(int argc, char *argv[]) /* init iterators with files */ fis[0] = fileIteratorCreate(argv[1], accessLogReader); for (i = 2; i < argc; ++i) - fis[i-1] = fileIteratorCreate(argv[i], swapStateReader); + fis[i - 1] = fileIteratorCreate(argv[i], swapStateReader); /* check that all files were found */ for (i = 0; i < fi_count; ++i) - if (!fis[i]) return -2; + if (!fis[i]) + return -2; /* read prefix to get start-up contents of the peer cache */ ready_time = -1; for (i = 1; i < fi_count; ++i) { FileIterator *fi = fis[i]; while (fi->inner_time > 0) { - if (((storeSwapLogData*)fi->entry)->op == SWAP_LOG_DEL) { + if (((storeSwapLogData *) fi->entry)->op == SWAP_LOG_DEL) { cachePurge(them, fi->entry, 0); if (ready_time < 0) - ready_time = fi->inner_time; + ready_time = fi->inner_time; } else { if (ready_time > 0 && fi->inner_time > ready_time) break; @@ -542,7 +544,7 @@ main(int argc, char *argv[]) } /* digest peer cache content */ cacheResetDigest(them); - us->digest = cacheDigestClone(them->digest); /* @netw@ */ + us->digest = cacheDigestClone(them->digest); /* @netw@ */ /* shift the time in access log to match ready_time */ fileIteratorSetCurTime(fis[0], ready_time); @@ -550,12 +552,12 @@ main(int argc, char *argv[]) /* iterate, use the iterator with the smallest positive inner_time */ cur_time = -1; do { - int next_i = -1; - time_t next_time = -1; + int next_i = -1; + time_t next_time = -1; active_fi_count = 0; for (i = 0; i < fi_count; ++i) { if (fis[i]->inner_time >= 0) { - if (!active_fi_count || fis[i]->inner_time < next_time) { + if (!active_fi_count || fis[i]->inner_time < next_time) { next_i = i; next_time = fis[i]->inner_time; } @@ -564,7 +566,7 @@ main(int argc, char *argv[]) } if (next_i >= 0) { cur_time = next_time; - /*fprintf(stderr, "%2d time: %d %s", next_i, (int)cur_time, ctime(&cur_time));*/ + /*fprintf(stderr, "%2d time: %d %s", next_i, (int)cur_time, ctime(&cur_time)); */ if (next_i == 0) cacheFetch(us, fis[next_i]->entry); else @@ -579,7 +581,7 @@ main(int argc, char *argv[]) cacheQueryReport(us, &us->qstats); /* clean */ - for (i = 0; i < argc-1; ++i) { + for (i = 0; i < argc - 1; ++i) { fileIteratorDestroy(fis[i]); } xfree(fis);