/*
- * $Id: CacheDigest.cc,v 1.9 1998/04/04 07:47:53 rousskov Exp $
+ * $Id: CacheDigest.cc,v 1.10 1998/04/06 22:32:05 wessels Exp $
*
* DEBUG: section 70 Cache Digest
* AUTHOR: Alex Rousskov
/* local types */
typedef struct {
- int bit_count; /* total number of bits */
- int bit_on_count; /* #bits turned on */
- int bseq_len_sum; /* sum of all bit seq length */
- int bseq_count; /* number of bit seqs */
+ int bit_count; /* total number of bits */
+ int bit_on_count; /* #bits turned on */
+ int bseq_len_sum; /* sum of all bit seq length */
+ int bseq_count; /* number of bit seqs */
} CacheDigestStats;
/* local functions */
-static void cacheDigestHashKey(int bit_count, const cache_key *key);
+static void cacheDigestHashKey(int bit_count, const cache_key * key);
/* configuration params */
static const int BitsPerEntry = 4;
/* returns mask utilization parameters */
static void
-cacheDigestStats(const CacheDigest * cd, CacheDigestStats *stats)
+cacheDigestStats(const CacheDigest * cd, CacheDigestStats * stats)
{
const int bit_count = cd->capacity * BitsPerEntry;
int on_count = 0;
}
void
-cacheDigestReport(CacheDigest *cd, const char *label, StoreEntry * e)
+cacheDigestReport(CacheDigest * cd, const char *label, StoreEntry * e)
{
CacheDigestStats stats;
assert(cd && e);
cacheDigestStats(cd, &stats);
storeAppendPrintf(e, "%s digest: size: %d bytes\n",
- label ? label : "", stats.bit_count/8
- );
+ label ? label : "", stats.bit_count / 8
+ );
storeAppendPrintf(e, "\t entries: count: %d capacity: %d util: %d%%\n",
cd->count,
cd->capacity,
xpercentInt(cd->count, cd->capacity)
- );
- storeAppendPrintf(e, "\t deletion attempts: %d\n",
+ );
+ storeAppendPrintf(e, "\t deletion attempts: %d\n",
cd->del_count
- );
- storeAppendPrintf(e, "\t bits: on: %d capacity: %d util: %d%%\n",
+ );
+ storeAppendPrintf(e, "\t bits: on: %d capacity: %d util: %d%%\n",
stats.bit_on_count, stats.bit_count,
xpercentInt(stats.bit_on_count, stats.bit_count)
- );
- storeAppendPrintf(e, "\t bit-seq: count: %d avg.len: %.2f\n",
+ );
+ storeAppendPrintf(e, "\t bit-seq: count: %d avg.len: %.2f\n",
stats.bseq_count,
xdiv(stats.bseq_len_sum, stats.bseq_count)
- );
+ );
}
static void
-cacheDigestHashKey(int bit_count, const cache_key *key)
+cacheDigestHashKey(int bit_count, const cache_key * key)
{
/* get four hashed values */
memcpy(hashed_keys, key, sizeof(hashed_keys));
/*
- * $Id: HttpHdrContRange.cc,v 1.3 1998/03/11 22:18:44 rousskov Exp $
+ * $Id: HttpHdrContRange.cc,v 1.4 1998/04/06 22:32:06 wessels Exp $
*
* DEBUG: section 68 HTTP Content-Range Header
* AUTHOR: Alex Rousskov
#include "squid.h"
-#if 0
- Currently only byte ranges are supported
-
- Content-Range = "Content-Range" ":" content-range-spec
- content-range-spec = byte-content-range-spec
- byte-content-range-spec = bytes-unit SP
- ( byte-range-resp-spec | "*") "/"
- ( entity-length | "*" )
- byte-range-resp-spec = first-byte-pos "-" last-byte-pos
- entity-length = 1*DIGIT
-#endif
+/*
+ * Currently only byte ranges are supported
+ *
+ * Content-Range = "Content-Range" ":" content-range-spec
+ * content-range-spec = byte-content-range-spec
+ * byte-content-range-spec = bytes-unit SP
+ * ( byte-range-resp-spec | "*") "/"
+ * ( entity-length | "*" )
+ * byte-range-resp-spec = first-byte-pos "-" last-byte-pos
+ * entity-length = 1*DIGIT
+ */
/* local constants */
/* parses range-resp-spec and inits spec, returns true on success */
static int
-httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec *spec, const char *field, int flen)
+httpHdrRangeRespSpecParseInit(HttpHdrRangeSpec * spec, const char *field, int flen)
{
const char *p;
assert(spec);
if (*field == '*')
return 1;
/* check format, must be %d-%d */
- if (!((p = strchr(field, '-')) && (p-field < flen))) {
+ if (!((p = strchr(field, '-')) && (p - field < flen))) {
debug(68, 2) ("invalid (no '-') resp-range-spec near: '%s'\n", field);
return 0;
}
/* parse offset */
if (!httpHeaderParseSize(field, &spec->offset))
- return 0;
+ return 0;
p++;
/* do we have last-pos ? */
if (p - field < flen) {
size_t last_pos;
if (!httpHeaderParseSize(p, &last_pos))
return 0;
- spec->length = size_diff(last_pos+1, spec->offset);
+ spec->length = size_diff(last_pos + 1, spec->offset);
}
/* we managed to parse, check if the result makes sence */
if (known_spec(spec->length) && !spec->length) {
}
static void
-httpHdrRangeRespSpecPackInto(const HttpHdrRangeSpec *spec, Packer *p)
+httpHdrRangeRespSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p)
{
if (!known_spec(spec->offset) || !known_spec(spec->length))
packerPrintf(p, "*");
else
- packerPrintf(p, "%d-%d",
- spec->offset, spec->offset+spec->length-1);
+ packerPrintf(p, "%d-%d",
+ spec->offset, spec->offset + spec->length - 1);
}
/*
/* returns true if ranges are valid; inits HttpHdrContRange */
int
-httpHdrContRangeParseInit(HttpHdrContRange *range, const char *str)
+httpHdrContRangeParseInit(HttpHdrContRange * range, const char *str)
{
const char *p;
assert(range && str);
return 0;
if (*str == '*')
range->spec.offset = range->spec.length = range_spec_unknown;
- else
- if (!httpHdrRangeRespSpecParseInit(&range->spec, str, p-str))
+ else if (!httpHdrRangeRespSpecParseInit(&range->spec, str, p - str))
return 0;
p++;
if (*p == '*')
range->elength = range_spec_unknown;
- else
- if (!httpHeaderParseSize(p, &range->elength))
+ else if (!httpHeaderParseSize(p, &range->elength))
return 0;
- debug(68, 8) ("parsed content-range field: %d-%d / %d\n",
- range->spec.offset, range->spec.offset+range->spec.length-1,
+ debug(68, 8) ("parsed content-range field: %d-%d / %d\n",
+ range->spec.offset, range->spec.offset + range->spec.length - 1,
range->elength);
return 1;
}
void
-httpHdrContRangeDestroy(HttpHdrContRange *range)
+httpHdrContRangeDestroy(HttpHdrContRange * range)
{
assert(range);
memFree(MEM_HTTP_HDR_CONTENT_RANGE, range);
/*
- * $Id: HttpHdrExtField.cc,v 1.2 1998/03/08 21:02:07 rousskov Exp $
+ * $Id: HttpHdrExtField.cc,v 1.3 1998/04/06 22:32:06 wessels Exp $
*
* DEBUG: section 69 HTTP Header: Extension Field
* AUTHOR: Alex Rousskov
/* implementation */
static HttpHdrExtField *
-httpHdrExtFieldDoCreate(const char *name, int name_len,
+httpHdrExtFieldDoCreate(const char *name, int name_len,
const char *value, int value_len)
{
HttpHdrExtField *f = xcalloc(1, sizeof(HttpHdrExtField));
httpHdrExtFieldCreate(const char *name, const char *value)
{
return httpHdrExtFieldDoCreate(
- name, strlen(name),
+ name, strlen(name),
value, strlen(value));
}
}
return httpHdrExtFieldDoCreate(
field_start, name_end - field_start,
- value_start, field_end - value_start);
+ value_start, field_end - value_start);
}
void
/*
- * $Id: HttpHdrRange.cc,v 1.5 1998/03/11 21:11:47 rousskov Exp $
+ * $Id: HttpHdrRange.cc,v 1.6 1998/04/06 22:32:07 wessels Exp $
*
* DEBUG: section 64 HTTP Range Header
* AUTHOR: Alex Rousskov
#include "squid.h"
-#if 0
- Currently only byte ranges are supported
-
- Essentially, there are three types of byte ranges:
-
- 1) first-byte-pos "-" last-byte-pos // range
- 2) first-byte-pos "-" // trailer
- 3) "-" suffix-length // suffix (last length bytes)
-
-
- When Range field is parsed, we have no clue about the content length of
- the document. Thus, we simply code an "absent" part using range_spec_unknown
- constant.
-
- Note: when response length becomes known, we convert any range spec into
- type one above. (Canonization process).
-
-#endif
+/*
+ * Currently only byte ranges are supported
+ *
+ * Essentially, there are three types of byte ranges:
+ *
+ * 1) first-byte-pos "-" last-byte-pos // range
+ * 2) first-byte-pos "-" // trailer
+ * 3) "-" suffix-length // suffix (last length bytes)
+ *
+ *
+ * When Range field is parsed, we have no clue about the content
+ * length of the document. Thus, we simply code an "absent" part
+ * using range_spec_unknown constant.
+ *
+ * Note: when response length becomes known, we convert any range
+ * spec into type one above. (Canonization process).
+ */
/* local constants */
#define known_spec(s) ((s) != range_spec_unknown)
#define size_min(a,b) ((a) <= (b) ? (a) : (b))
#define size_diff(a,b) ((a) >= (b) ? ((a)-(b)) : 0)
-static HttpHdrRangeSpec *httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec);
-static int httpHdrRangeSpecCanonize(HttpHdrRangeSpec *spec, size_t clen);
-static void httpHdrRangeSpecPackInto(const HttpHdrRangeSpec *spec, Packer *p);
+static HttpHdrRangeSpec *httpHdrRangeSpecDup(const HttpHdrRangeSpec * spec);
+static int httpHdrRangeSpecCanonize(HttpHdrRangeSpec * spec, size_t clen);
+static void httpHdrRangeSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p);
/* globals */
static int RangeParsedCount = 0;
static HttpHdrRangeSpec *
httpHdrRangeSpecParseCreate(const char *field, int flen)
{
- HttpHdrRangeSpec spec = { range_spec_unknown, range_spec_unknown };
+ HttpHdrRangeSpec spec =
+ {range_spec_unknown, range_spec_unknown};
const char *p;
if (flen < 2)
return NULL;
/* is it a suffix-byte-range-spec ? */
if (*field == '-') {
- if (!httpHeaderParseSize(field+1, &spec.length))
+ if (!httpHeaderParseSize(field + 1, &spec.length))
return NULL;
} else
- /* must have a '-' somewhere in _this_ field */
- if (!((p = strchr(field, '-')) || (p-field >= flen))) {
+ /* must have a '-' somewhere in _this_ field */
+ if (!((p = strchr(field, '-')) || (p - field >= flen))) {
debug(64, 2) ("ignoring invalid (missing '-') range-spec near: '%s'\n", field);
return NULL;
} else {
if (p - field < flen) {
size_t last_pos;
if (!httpHeaderParseSize(p, &last_pos))
- return NULL;
- spec.length = size_diff(last_pos+1, spec.offset);
+ return NULL;
+ spec.length = size_diff(last_pos + 1, spec.offset);
}
}
/* we managed to parse, check if the result makes sence */
}
static void
-httpHdrRangeSpecDestroy(HttpHdrRangeSpec *spec)
+httpHdrRangeSpecDestroy(HttpHdrRangeSpec * spec)
{
memFree(MEM_HTTP_HDR_RANGE_SPEC, spec);
}
static HttpHdrRangeSpec *
-httpHdrRangeSpecDup(const HttpHdrRangeSpec *spec)
+httpHdrRangeSpecDup(const HttpHdrRangeSpec * spec)
{
HttpHdrRangeSpec *dup = httpHdrRangeSpecCreate();
dup->offset = spec->offset;
}
static void
-httpHdrRangeSpecPackInto(const HttpHdrRangeSpec *spec, Packer *p)
+httpHdrRangeSpecPackInto(const HttpHdrRangeSpec * spec, Packer * p)
{
- if (!known_spec(spec->offset)) /* suffix */
+ if (!known_spec(spec->offset)) /* suffix */
packerPrintf(p, "-%d", spec->length);
- else
- if (!known_spec(spec->length)) /* trailer */
+ else if (!known_spec(spec->length)) /* trailer */
packerPrintf(p, "%d-", spec->offset);
- else /* range */
- packerPrintf(p, "%d-%d",
- spec->offset, spec->offset+spec->length-1);
+ else /* range */
+ packerPrintf(p, "%d-%d",
+ spec->offset, spec->offset + spec->length - 1);
}
/* fills "absent" positions in range specification based on response body size
- returns true if the range is still valid
- range is valid if its intersection with [0,length-1] is not empty
-*/
+ * returns true if the range is still valid
+ * range is valid if its intersection with [0,length-1] is not empty
+ */
static int
-httpHdrRangeSpecCanonize(HttpHdrRangeSpec *spec, size_t clen)
+httpHdrRangeSpecCanonize(HttpHdrRangeSpec * spec, size_t clen)
{
- if (!known_spec(spec->offset)) /* suffix */
+ if (!known_spec(spec->offset)) /* suffix */
spec->offset = size_diff(clen, spec->length);
- else
- if (!known_spec(spec->length)) /* trailer */
+ else if (!known_spec(spec->length)) /* trailer */
spec->length = size_diff(clen, spec->offset);
/* we have a "range" now, adjust length if needed */
assert(known_spec(spec->length));
/* returns true if ranges are valid; inits HttpHdrRange */
int
-httpHdrRangeParseInit(HttpHdrRange *range, const char *str)
+httpHdrRangeParseInit(HttpHdrRange * range, const char *str)
{
const char *item;
const char *pos = NULL;
}
void
-httpHdrRangeDestroy(HttpHdrRange *range)
+httpHdrRangeDestroy(HttpHdrRange * range)
{
assert(range);
while (range->specs.count)
* - there is at least one range spec
*/
int
-httpHdrRangeCanonize(HttpHdrRange *range, size_t clen)
+httpHdrRangeCanonize(HttpHdrRange * range, size_t clen)
{
int i;
assert(range);
for (i = 0; i < range->specs.count; i++)
- if (!httpHdrRangeSpecCanonize(range->specs.items[i], clen))
+ if (!httpHdrRangeSpecCanonize(range->specs.items[i], clen))
return 0;
return range->specs.count;
}
/* searches for next range, returns true if found */
int
-httpHdrRangeGetSpec(const HttpHdrRange *range, HttpHdrRangeSpec *spec, int *pos)
+httpHdrRangeGetSpec(const HttpHdrRange * range, HttpHdrRangeSpec * spec, int *pos)
{
assert(range && spec);
assert(pos && *pos >= -1 && *pos < range->specs.count);
(*pos)++;
if (*pos < range->specs.count) {
- *spec = *(HttpHdrRangeSpec*)range->specs.items[*pos];
+ *spec = *(HttpHdrRangeSpec *) range->specs.items[*pos];
return 1;
}
spec->offset = spec->length = 0;
/*
- * $Id: HttpHeader.cc,v 1.30 1998/04/05 20:32:44 wessels Exp $
+ * $Id: HttpHeader.cc,v 1.31 1998/04/06 22:32:07 wessels Exp $
*
* DEBUG: section 55 HTTP Header
* AUTHOR: Alex Rousskov
{"Last-Modified", HDR_LAST_MODIFIED, ftDate_1123},
{"Location", HDR_LOCATION, ftStr},
{"Max-Forwards", HDR_MAX_FORWARDS, ftInt},
- {"Mime-Version", HDR_MIME_VERSION, ftStr}, /* for now */
+ {"Mime-Version", HDR_MIME_VERSION, ftStr}, /* for now */
{"Proxy-Authenticate", HDR_PROXY_AUTHENTICATE, ftStr},
{"Proxy-Connection", HDR_PROXY_CONNECTION, ftStr},
{"Public", HDR_PUBLIC, ftStr},
* headers with field values defined as #(values) in HTTP/1.1
* Headers that are currently not recognized, are commented out.
*/
-static HttpHeaderMask ListHeadersMask; /* set run-time using ListHeadersArr */
+static HttpHeaderMask ListHeadersMask; /* set run-time using ListHeadersArr */
static http_hdr_type ListHeadersArr[] =
{
HDR_ACCEPT,
/* HDR_EXPECT, HDR_TE, HDR_TRAILER */
};
-static HttpHeaderMask ReplyHeadersMask; /* set run-time using ReplyHeaders */
+static HttpHeaderMask ReplyHeadersMask; /* set run-time using ReplyHeaders */
static http_hdr_type ReplyHeadersArr[] =
{
HDR_ACCEPT, HDR_ACCEPT_CHARSET, HDR_ACCEPT_ENCODING, HDR_ACCEPT_LANGUAGE,
HDR_UPGRADE, HDR_WARNING, HDR_PROXY_CONNECTION, HDR_X_CACHE, HDR_OTHER
};
-static HttpHeaderMask RequestHeadersMask; /* set run-time using RequestHeaders */
+static HttpHeaderMask RequestHeadersMask; /* set run-time using RequestHeaders */
static http_hdr_type RequestHeadersArr[] =
{
HDR_RANGE, HDR_OTHER
static void httpHeaderDelAt(HttpHeader * hdr, HttpHeaderPos pos);
/* static int httpHeaderDelById(HttpHeader * hdr, http_hdr_type id); */
static void httpHeaderAddEntry(HttpHeader * hdr, HttpHeaderEntry * e);
-static String httpHeaderJoinEntries(const HttpHeader *hdr, http_hdr_type id);
+static String httpHeaderJoinEntries(const HttpHeader * hdr, http_hdr_type id);
static HttpHeaderEntry *httpHeaderEntryCreate(http_hdr_type id, const char *name, const char *value);
static void httpHeaderEntryDestroy(HttpHeaderEntry * e);
{
int i;
/* check that we have enough space for masks */
- assert(8*sizeof(HttpHeaderMask) >= HDR_ENUM_END);
+ assert(8 * sizeof(HttpHeaderMask) >= HDR_ENUM_END);
Headers = httpHeaderBuildFieldsInfo(HeadersAttrs, HDR_ENUM_END);
/* create masks */
httpHeaderCalcMask(&ListHeadersMask, (const int *) ListHeadersArr, countof(ListHeadersArr));
debug(55, 0) ("httpHeaderClean BUG: entry[%d] is invalid (%d). Ignored.\n",
pos, e->id);
else
- /* end of hack */
- /* yes, this destroy() leaves us in an incosistent state */
- httpHeaderEntryDestroy(e);
+ /* end of hack */
+ /* yes, this destroy() leaves us in an incosistent state */
+ httpHeaderEntryDestroy(e);
}
arrayClean(&hdr->entries);
}
/* use fresh entries to replace old ones */
void
-httpHeaderUpdate(HttpHeader *old, const HttpHeader *fresh)
+httpHeaderUpdate(HttpHeader * old, const HttpHeader * fresh)
{
HttpHeaderEntry *e;
HttpHeaderEntry *e_clone;
const char *field_end = field_start + strcspn(field_start, "\r\n");
if (!*field_end || field_end > header_end)
return httpHeaderReset(hdr); /* missing <CRLF> */
- e = httpHeaderEntryParseCreate(field_start, field_end);
+ e = httpHeaderEntryParseCreate(field_start, field_end);
if (e != NULL)
httpHeaderAddEntry(hdr, e);
else
if (*field_start == '\n')
field_start++;
}
- return 1; /* even if no fields where found, it is a valid header */
+ return 1; /* even if no fields where found, it is a valid header */
}
/*
debug(55, 8) ("searching for next e in hdr %p from %d\n", hdr, *pos);
for ((*pos)++; *pos < hdr->entries.count; (*pos)++) {
if (hdr->entries.items[*pos])
- return hdr->entries.items[*pos];
+ return hdr->entries.items[*pos];
}
debug(55, 8) ("no more entries in hdr %p\n", hdr);
return NULL;
}
/* hm.. we thought it was there, but it was not found */
assert(0);
- return NULL; /* not reached */
+ return NULL; /* not reached */
}
/*
if (e->id == id)
result = e;
}
- assert(result); /* must be there! */
+ assert(result); /* must be there! */
return result;
}
int count = 0;
HttpHeaderPos pos = HttpHeaderInitPos;
HttpHeaderEntry *e;
- httpHeaderMaskInit(&hdr->mask); /* temporal inconsistency */
+ httpHeaderMaskInit(&hdr->mask); /* temporal inconsistency */
debug(55, 7) ("deleting '%s' fields in hdr %p\n", name, hdr);
while ((e = httpHeaderGetEntry(hdr, &pos))) {
if (!strCaseCmp(e->name, name)) {
}
static String
-httpHeaderJoinEntries(const HttpHeader *hdr, http_hdr_type id)
+httpHeaderJoinEntries(const HttpHeader * hdr, http_hdr_type id)
{
String s = StringNull;
HttpHeaderEntry *e;
httpHeaderPutInt(HttpHeader * hdr, http_hdr_type id, int number)
{
assert_eid(id);
- assert(Headers[id].type == ftInt); /* must be of an appropriatre type */
+ assert(Headers[id].type == ftInt); /* must be of an appropriatre type */
assert(number >= 0);
httpHeaderAddEntry(hdr, httpHeaderEntryCreate(id, NULL, xitoa(number)));
}
httpHeaderPutTime(HttpHeader * hdr, http_hdr_type id, time_t time)
{
assert_eid(id);
- assert(Headers[id].type == ftDate_1123); /* must be of an appropriatre type */
+ assert(Headers[id].type == ftDate_1123); /* must be of an appropriatre type */
assert(time >= 0);
httpHeaderAddEntry(hdr, httpHeaderEntryCreate(id, NULL, mkrfc1123(time)));
}
httpHeaderPutStr(HttpHeader * hdr, http_hdr_type id, const char *str)
{
assert_eid(id);
- assert(Headers[id].type == ftStr); /* must be of an appropriatre type */
+ assert(Headers[id].type == ftStr); /* must be of an appropriatre type */
assert(str);
httpHeaderAddEntry(hdr, httpHeaderEntryCreate(id, NULL, str));
}
assert_eid(id);
assert(Headers[id].type == ftStr); /* must be of an appropriate type */
if ((e = httpHeaderFindEntry(hdr, id))) {
- httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */
+ httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */
return strBuf(e->value);
}
return NULL;
assert_eid(id);
assert(Headers[id].type == ftStr); /* must be of an appropriate type */
if ((e = httpHeaderFindLastEntry(hdr, id))) {
- httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */
+ httpHeaderNoteParsedEntry(e->id, e->value, 0); /* no errors are possible */
return strBuf(e->value);
}
return NULL;
/* note: name_start == field_start */
const char *name_end = strchr(field_start, ':');
const int name_len = name_end ? name_end - field_start : 0;
- const char *value_start = field_start + name_len + 1; /* skip ':' */
+ const char *value_start = field_start + name_len + 1; /* skip ':' */
/* note: value_end == field_end */
HeaderEntryParsedCount++;
Headers[id].stat.parsCount++;
if (error) {
Headers[id].stat.errCount++;
- debug(55,2) ("cannot parse hdr field: '%s: %s'\n",
+ debug(55, 2) ("cannot parse hdr field: '%s: %s'\n",
strBuf(Headers[id].name), strBuf(context));
}
}
{
if (count)
storeAppendPrintf(sentry, "%2d\t %5d\t %5d\t %6.2f\n",
- idx, (int)val, count,
+ idx, (int) val, count,
xpercent(count, HeaderDestroyedCount));
}
/*
- * $Id: HttpHeaderTools.cc,v 1.7 1998/04/02 05:35:22 rousskov Exp $
+ * $Id: HttpHeaderTools.cc,v 1.8 1998/04/06 22:32:08 wessels Exp $
*
* DEBUG: section 66 HTTP Header Tools
* AUTHOR: Alex Rousskov
HttpHeaderFieldInfo *
-httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs *attrs, int count)
+httpHeaderBuildFieldsInfo(const HttpHeaderFieldAttrs * attrs, int count)
{
int i;
HttpHeaderFieldInfo *table = NULL;
/* sanity checks */
assert(id >= 0 && id < count);
assert(attrs[i].name);
- assert(info->id == 0 && info->type == 0); /* was not set before */
+ assert(info->id == 0 && info->type == 0); /* was not set before */
/* copy and init fields */
info->id = id;
info->type = attrs[i].type;
}
void
-httpHeaderDestroyFieldsInfo(HttpHeaderFieldInfo *table, int count)
+httpHeaderDestroyFieldsInfo(HttpHeaderFieldInfo * table, int count)
{
int i;
for (i = 0; i < count; ++i)
}
void
-httpHeaderMaskInit(HttpHeaderMask *mask)
+httpHeaderMaskInit(HttpHeaderMask * mask)
{
memset(mask, 0, sizeof(*mask));
}
/* calculates a bit mask of a given array */
void
-httpHeaderCalcMask(HttpHeaderMask *mask, const int *enums, int count)
+httpHeaderCalcMask(HttpHeaderMask * mask, const int *enums, int count)
{
int i;
assert(mask && enums);
int
-httpHeaderIdByName(const char *name, int name_len, const HttpHeaderFieldInfo *info, int end)
+httpHeaderIdByName(const char *name, int name_len, const HttpHeaderFieldInfo * info, int end)
{
int i;
for (i = 0; i < end; ++i) {
if (name_len >= 0 && name_len != strLen(info[i].name))
continue;
if (!strncasecmp(name, strBuf(info[i].name),
- name_len < 0 ? strLen(info[i].name) + 1 : name_len))
- return i;
+ name_len < 0 ? strLen(info[i].name) + 1 : name_len))
+ return i;
}
return -1;
}
{
#define SHORT_PREFIX_SIZE 512
LOCAL_ARRAY(char, buf, SHORT_PREFIX_SIZE);
- const int sz = 1 + (end ? end-str : strlen(str));
+ const int sz = 1 + (end ? end - str : strlen(str));
xstrncpy(buf, str, (sz > SHORT_PREFIX_SIZE) ? SHORT_PREFIX_SIZE : sz);
return buf;
}
*value = atoi(start);
if (!*value && !isdigit(*start)) {
debug(66, 2) ("failed to parse an int header field near '%s'\n", start);
- return 0;
+ return 0;
}
return 1;
}
int
-httpHeaderParseSize(const char *start, size_t *value)
+httpHeaderParseSize(const char *start, size_t * value)
{
int v;
const int res = httpHeaderParseInt(start, &v);
* parses a given string then packs compiled headers and compares the result
* with the original, reports discrepancies
*/
-void httpHeaderTestParser(const char *hstr)
+void
+httpHeaderTestParser(const char *hstr)
{
static int bug_count = 0;
int hstr_len;
if (!strncasecmp(hstr, "HTTP/", 5)) {
const char *p = strchr(hstr, '\n');
if (p)
- hstr = p+1;
+ hstr = p + 1;
}
/* skip invalid first line if any */
if (isspace(*hstr)) {
const char *p = strchr(hstr, '\n');
if (p)
- hstr = p+1;
+ hstr = p + 1;
}
hstr_len = strlen(hstr);
/* skip terminator if any */
if (strstr(hstr, "\n\r\n"))
hstr_len -= 2;
- else
- if (strstr(hstr, "\n\n"))
+ else if (strstr(hstr, "\n\n"))
hstr_len -= 1;
httpHeaderInit(&hdr);
/* debugLevels[55] = 8; */
- parse_success = httpHeaderParse(&hdr, hstr, hstr+hstr_len);
+ parse_success = httpHeaderParse(&hdr, hstr, hstr + hstr_len);
/* debugLevels[55] = 2; */
if (!parse_success) {
debug(66, 2) ("TEST (%d): failed to parsed a header: {\n%s}\n", bug_count, hstr);
if ((pos = abs(httpHeaderStrCmp(hstr, mb.buf, hstr_len)))) {
bug_count++;
debug(66, 2) ("TEST (%d): hdr parsing bug (pos: %d near '%s'): expected: {\n%s} got: {\n%s}\n",
- bug_count, pos, hstr+pos, hstr, mb.buf);
+ bug_count, pos, hstr + pos, hstr, mb.buf);
}
httpHeaderClean(&hdr);
packerClean(&p);
}
-/* like strncasecmp but ignores ws characters */
+/* like strncasecmp but ignores ws characters */
static int
httpHeaderStrCmp(const char *h1, const char *h2, int len)
{
while (1) {
const char c1 = toupper(h1[len1 += xcountws(h1 + len1)]);
const char c2 = toupper(h2[len2 += xcountws(h2 + len2)]);
- if (c1 < c2) return -len1;
- if (c1 > c2) return +len1;
+ if (c1 < c2)
+ return -len1;
+ if (c1 > c2)
+ return +len1;
if (!c1 && !c2)
return 0;
- if (c1) len1++;
- if (c2) len2++;
+ if (c1)
+ len1++;
+ if (c2)
+ len2++;
}
return 0;
}
/*
- * $Id: HttpReply.cc,v 1.14 1998/04/05 20:32:44 wessels Exp $
+ * $Id: HttpReply.cc,v 1.15 1998/04/06 22:32:09 wessels Exp $
*
* DEBUG: section 58 HTTP Reply (Response)
* AUTHOR: Alex Rousskov
MemBuf
httpPacked304Reply(const HttpReply * rep)
{
- static const http_hdr_type ImsEntries[] = { HDR_DATE, HDR_CONTENT_LENGTH, HDR_CONTENT_TYPE, HDR_EXPIRES, HDR_LAST_MODIFIED, /* eof */ HDR_OTHER };
+ static const http_hdr_type ImsEntries[] =
+ {HDR_DATE, HDR_CONTENT_LENGTH, HDR_CONTENT_TYPE, HDR_EXPIRES, HDR_LAST_MODIFIED, /* eof */ HDR_OTHER};
http_hdr_type t;
MemBuf mb;
Packer p;
rep->content_range = httpHeaderGetContRange(hdr);
str = httpHeaderGetStr(hdr, HDR_PROXY_CONNECTION);
if (NULL == str)
- str = httpHeaderGetStr(hdr, HDR_CONNECTION); /* @?@ FIX ME */
+ str = httpHeaderGetStr(hdr, HDR_CONNECTION); /* @?@ FIX ME */
rep->proxy_keep_alive = str && 0 == strcasecmp(str, "Keep-Alive");
/* final adjustments */
/* The max-age directive takes priority over Expires, check it first */
if (rep->cache_control && rep->cache_control->max_age >= 0)
rep->expires = squid_curtime + rep->cache_control->max_age;
else
- /*
- * The HTTP/1.0 specs says that robust implementations should consider bad
- * or malformed Expires header as equivalent to "expires immediately."
- */
+ /*
+ * The HTTP/1.0 specs says that robust implementations should consider bad
+ * or malformed Expires header as equivalent to "expires immediately."
+ */
if (rep->expires < 0 && httpHeaderHas(hdr, HDR_EXPIRES))
rep->expires = squid_curtime;
}
+
/*
- * $Id: String.cc,v 1.3 1998/03/08 20:42:00 rousskov Exp $
+ * $Id: String.cc,v 1.4 1998/04/06 22:32:10 wessels Exp $
*
* DEBUG: section 67 String
* AUTHOR: Duane Wessels
#include "squid.h"
static void
-stringInitBuf(String *s, size_t sz)
+stringInitBuf(String * s, size_t sz)
{
s->buf = memAllocBuf(sz, &sz);
assert(sz < 65536);
}
void
-stringInit(String *s, const char *str)
+stringInit(String * s, const char *str)
{
assert(s);
if (str)
}
void
-stringLimitInit(String *s, const char *str, int len)
+stringLimitInit(String * s, const char *str, int len)
{
assert(s && str);
- stringInitBuf(s, len+1);
+ stringInitBuf(s, len + 1);
s->len = len;
xmemcpy(s->buf, str, len);
s->buf[len] = '\0';
}
String
-stringDup(const String *s)
+stringDup(const String * s)
{
String dup;
assert(s);
}
void
-stringClean(String *s)
+stringClean(String * s)
{
assert(s);
if (s->buf)
}
void
-stringReset(String *s, const char *str)
+stringReset(String * s, const char *str)
{
stringClean(s);
stringInit(s, str);
}
void
-stringAppend(String *s, const char *str, int len)
+stringAppend(String * s, const char *str, int len)
{
assert(s && s->buf);
if (s->len + len < s->size) {
snew.len = s->len + len;
stringInitBuf(&snew, snew.len + 1);
xmemcpy(snew.buf, s->buf, s->len);
- xmemcpy(snew.buf+s->len, str, len);
+ xmemcpy(snew.buf + s->len, str, len);
snew.buf[snew.len] = '\0';
stringClean(s);
*s = snew;
/*
- * $Id: access_log.cc,v 1.26 1998/03/31 08:37:29 wessels Exp $
+ * $Id: access_log.cc,v 1.27 1998/04/06 22:32:10 wessels Exp $
*
* DEBUG: section 46 Access Log
* AUTHOR: Duane Wessels
}
static
-void
+void
fvdbFreeEntry(void *data)
{
fvdb_entry *fv = data;
/*
- * $Id: acl.cc,v 1.156 1998/04/04 01:44:00 kostas Exp $
+ * $Id: acl.cc,v 1.157 1998/04/06 22:32:11 wessels Exp $
*
* DEBUG: section 28 Access Control
* AUTHOR: Duane Wessels
static void
aclParseSnmpComm(void *data)
{
- acl_snmp_comm **q=data;
+ acl_snmp_comm **q = data;
acl_snmp_comm *p;
char *t;
t = strtok(NULL, w_space);
if (t) {
- p=xcalloc(1, sizeof(acl_snmp_comm));
- p->name=xstrdup(t);
- p->community=NULL;
- *q=p;
+ p = xcalloc(1, sizeof(acl_snmp_comm));
+ p->name = xstrdup(t);
+ p->community = NULL;
+ *q = p;
}
- t=strtok(NULL, w_space);
+ t = strtok(NULL, w_space);
return;
}
-
-
-
-
/*
- * $Id: client.cc,v 1.63 1998/04/02 04:45:03 rousskov Exp $
+ * $Id: client.cc,v 1.64 1998/04/06 22:32:12 wessels Exp $
*
* DEBUG: section 0 WWW Client
* AUTHOR: Harvest Derived
char *t;
strncpy(extra_hdrs, optarg, sizeof(extra_hdrs));
while ((t = strstr(extra_hdrs, "\\n")))
- *t = '\r', *(t+1) = '\n';
+ *t = '\r', *(t + 1) = '\n';
}
break;
case 'v':
/*
- * $Id: client_side.cc,v 1.256 1998/04/05 02:23:52 rousskov Exp $
+ * $Id: client_side.cc,v 1.257 1998/04/06 22:32:13 wessels Exp $
*
* DEBUG: section 33 Client-side Routines
* AUTHOR: Duane Wessels
/* handle internal objects */
if (*url == '/' && strncmp(url, "/squid-internal/", 16) == 0) {
/* prepend our name & port */
- http->uri = xstrdup(urlInternal(NULL, url+16));
+ http->uri = xstrdup(urlInternal(NULL, url + 16));
http->internal = 1;
}
/* see if we running in Config2.Accel.on, if so got to convert it to URL */
/*
- * $Id: event.cc,v 1.12 1998/04/05 22:29:00 rousskov Exp $
+ * $Id: event.cc,v 1.13 1998/04/06 22:32:14 wessels Exp $
*
* DEBUG: section 41 Event Processing
* AUTHOR: Henrik Nordstrom
eventAddIsh(const char *name, EVH * func, void *arg, time_t delta_ish)
{
if (delta_ish >= 3) {
- const time_t two_third = (2*delta_ish)/3;
+ const time_t two_third = (2 * delta_ish) / 3;
delta_ish = two_third + (squid_random() % two_third);
}
eventAdd(name, func, arg, delta_ish);
/*
- * $Id: main.cc,v 1.243 1998/04/03 22:05:12 rousskov Exp $
+ * $Id: main.cc,v 1.244 1998/04/06 22:32:15 wessels Exp $
*
* DEBUG: section 1 Startup and Main Loop
* AUTHOR: Harvest Derived
pconnInit();
eventInit();
}
-
serverConnectionsOpen();
if (theOutIcpConnection >= 0 && (!Config2.Accel.on || Config.onoff.accel_with_proxy))
neighbors_open(theOutIcpConnection);
extern void peerSelectInit(void);
/* peer_digest.c */
-extern void peerDigestValidate(peer *p);
-extern void peerDigestRequest(peer *p);
+extern void peerDigestValidate(peer * p);
+extern void peerDigestRequest(peer * p);
extern void protoDispatch(int, StoreEntry *, request_t *);
/*
- * $Id: snmp_core.cc,v 1.1 1998/04/04 01:44:04 kostas Exp $
+ * $Id: snmp_core.cc,v 1.2 1998/04/06 22:32:18 wessels Exp $
*
* DEBUG: section 49 SNMP support
* AUTHOR: Kostas Anagnostakis
}
void
-snmpAgentParseDone(int errstat, void * data)
+snmpAgentParseDone(int errstat, void *data)
{
- snmp_request_t *snmp_rq=(snmp_request_t *)data;
+ snmp_request_t *snmp_rq = (snmp_request_t *) data;
LOCAL_ARRAY(char, deb_line, 4096);
int sock = snmp_rq->sock;
long this_reqid = snmp_rq->reqid;
if (memcmp(&snmp_rq->from, &local_snmpd, sizeof(struct sockaddr_in)) == 0) {
/* look it up */
- if (snmpFwd_removePending(&snmp_rq->from, this_reqid)) { /* failed */
+ if (snmpFwd_removePending(&snmp_rq->from, this_reqid)) { /* failed */
debug(49, 2) ("snmp: bogus response from %s.\n",
inet_ntoa(snmp_rq->from.sin_addr));
if (snmp_rq->community)
void
snmpAgentParse(void *data)
{
- snmp_request_t * rq=(snmp_request_t *)data;
+ snmp_request_t *rq = (snmp_request_t *) data;
u_char *buf = rq->buf;
int len = rq->len;
PDU = snmp_pdu_create(0);
Community = snmp_parse(Session, PDU, buf, len);
- if (!snmp_coexist_V2toV1(PDU)) { /* incompatibility */
- debug(49, 3) ("snmpAgentParse: Incompatible V2 packet.\n");
- snmp_free_pdu(PDU);
- snmpAgentParseDone(0, rq);
- return;
+ if (!snmp_coexist_V2toV1(PDU)) { /* incompatibility */
+ debug(49, 3) ("snmpAgentParse: Incompatible V2 packet.\n");
+ snmp_free_pdu(PDU);
+ snmpAgentParseDone(0, rq);
+ return;
}
rq->community = Community;
rq->PDU = PDU;
debug(49, 5) ("snmpAgentParse: reqid=[%d]\n", PDU->reqid);
if (!Community) {
- debug(49, 2) ("snmpAgentParse: WARNING: Could not parse community\n");
+ debug(49, 2) ("snmpAgentParse: WARNING: Could not parse community\n");
- snmp_free_pdu(PDU);
- snmpAgentParseDone(0, rq);
- return;
+ snmp_free_pdu(PDU);
+ snmpAgentParseDone(0, rq);
+ return;
}
snmpAclCheckStart(rq);
}
/* Create a response */
Answer = snmp_pdu_create(SNMP_PDU_RESPONSE);
if (Answer == NULL)
- return (NULL);
+ return (NULL);
Answer->reqid = PDU->reqid;
Answer->errindex = 0;
if (PDU->command == SNMP_PDU_GET) {
- RespVars = &(Answer->variables);
- /* Loop through all variables */
- for (VarPtrP = &(PDU->variables);
- *VarPtrP;
- VarPtrP = &((*VarPtrP)->next_variable)) {
- VarPtr = *VarPtrP;
-
- index++;
-
- /* Find the parsing function for this variable */
- ParseFn = oidlist_Find(VarPtr->name, VarPtr->name_length);
-
- if (ParseFn == NULL) {
- Answer->errstat = SNMP_ERR_NOSUCHNAME;
- debug(49, 5) ("snmpAgentResponse: No such oid. ");
- } else
- VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat));
-
- /* Was there an error? */
- if ((Answer->errstat != SNMP_ERR_NOERROR) ||
- (VarNew == NULL)) {
- Answer->errindex = index;
- debug(49, 5) ("snmpAgentParse: successful.\n");
- /* Just copy the rest of the variables. Quickly. */
- *RespVars = VarPtr;
- *VarPtrP = NULL;
- return (Answer);
- }
- /* No error. Insert this var at the end, and move on to the next.
- */
- *RespVars = VarNew;
- RespVars = &(VarNew->next_variable);
- }
- return (Answer);
+ RespVars = &(Answer->variables);
+ /* Loop through all variables */
+ for (VarPtrP = &(PDU->variables);
+ *VarPtrP;
+ VarPtrP = &((*VarPtrP)->next_variable)) {
+ VarPtr = *VarPtrP;
+
+ index++;
+
+ /* Find the parsing function for this variable */
+ ParseFn = oidlist_Find(VarPtr->name, VarPtr->name_length);
+
+ if (ParseFn == NULL) {
+ Answer->errstat = SNMP_ERR_NOSUCHNAME;
+ debug(49, 5) ("snmpAgentResponse: No such oid. ");
+ } else
+ VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat));
+
+ /* Was there an error? */
+ if ((Answer->errstat != SNMP_ERR_NOERROR) ||
+ (VarNew == NULL)) {
+ Answer->errindex = index;
+ debug(49, 5) ("snmpAgentParse: successful.\n");
+ /* Just copy the rest of the variables. Quickly. */
+ *RespVars = VarPtr;
+ *VarPtrP = NULL;
+ return (Answer);
+ }
+ /* No error. Insert this var at the end, and move on to the next.
+ */
+ *RespVars = VarNew;
+ RespVars = &(VarNew->next_variable);
+ }
+ return (Answer);
} else if (PDU->command == SNMP_PDU_GETNEXT) {
- oid *TmpOidName;
- int TmpOidNameLen = 0;
-
- /* Find the next OID. */
- VarPtr = PDU->variables;
-
- ParseFn = oidlist_Next(VarPtr->name, VarPtr->name_length,
- &(TmpOidName), (snint *) & (TmpOidNameLen));
-
- if (ParseFn == NULL) {
- Answer->errstat = SNMP_ERR_NOSUCHNAME;
- debug(49, 5) ("snmpAgentResponse: No such oid: ");
- snmpDebugOid(5, VarPtr->name, VarPtr->name_length);
- } else {
- xfree(VarPtr->name);
- VarPtr->name = TmpOidName;
- VarPtr->name_length = TmpOidNameLen;
- VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat));
- }
-
- /* Was there an error? */
- if (Answer->errstat != SNMP_ERR_NOERROR) {
- Answer->errindex = 1;
-
- /* Just copy this variable */
- Answer->variables = VarPtr;
- PDU->variables = NULL;
- } else {
- Answer->variables = VarNew;
- }
-
- /* Done. Return this PDU */
- return (Answer);
- } /* end SNMP_PDU_GETNEXT */
+ oid *TmpOidName;
+ int TmpOidNameLen = 0;
+
+ /* Find the next OID. */
+ VarPtr = PDU->variables;
+
+ ParseFn = oidlist_Next(VarPtr->name, VarPtr->name_length,
+ &(TmpOidName), (snint *) & (TmpOidNameLen));
+
+ if (ParseFn == NULL) {
+ Answer->errstat = SNMP_ERR_NOSUCHNAME;
+ debug(49, 5) ("snmpAgentResponse: No such oid: ");
+ snmpDebugOid(5, VarPtr->name, VarPtr->name_length);
+ } else {
+ xfree(VarPtr->name);
+ VarPtr->name = TmpOidName;
+ VarPtr->name_length = TmpOidNameLen;
+ VarNew = (*ParseFn) (VarPtr, (snint *) & (Answer->errstat));
+ }
+
+ /* Was there an error? */
+ if (Answer->errstat != SNMP_ERR_NOERROR) {
+ Answer->errindex = 1;
+
+ /* Just copy this variable */
+ Answer->variables = VarPtr;
+ PDU->variables = NULL;
+ } else {
+ Answer->variables = VarNew;
+ }
+
+ /* Done. Return this PDU */
+ return (Answer);
+ } /* end SNMP_PDU_GETNEXT */
debug(49, 5) ("snmpAgentResponse: Ignoring PDU %d unknown command\n", PDU->command);
snmp_free_pdu(Answer);
return (NULL);
int x;
objid[0] = '\0';
for (x = 0; x < Len; x++) {
- snprintf(mbuf, 16, ".%u", (unsigned char) Name[x]);
- strcat(objid, mbuf);
+ snprintf(mbuf, 16, ".%u", (unsigned char) Name[x]);
+ strcat(objid, mbuf);
}
debug(49, lvl) (" oid = %s\n", objid);
}
/* Compare the first M bytes. */
while (m) {
- if (*aptr < *bptr)
- return (-1);
- if (*aptr++ > *bptr++)
- return (1);
- m--;
+ if (*aptr < *bptr)
+ return (-1);
+ if (*aptr++ > *bptr++)
+ return (1);
+ m--;
}
/* The first M bytes were identical. So, they share the same
* root. The shorter one must come first.
*/
if (ALen < BLen)
- return (-1);
+ return (-1);
if (ALen > BLen)
- return (1);
+ return (1);
/* Same length, all bytes identical. Must be the same OID. */
return (0);
/* Compare the first M bytes. */
while (count != m) {
- if (*aptr < *bptr)
- return (-1);
- if (*aptr++ > *bptr++)
- return (1);
- count++;
+ if (*aptr < *bptr)
+ return (-1);
+ if (*aptr++ > *bptr++)
+ return (1);
+ count++;
}
if (m == CompLen)
- return (0);
+ return (0);
if (ALen < BLen)
- return (-1);
+ return (-1);
if (ALen > BLen)
- return (1);
+ return (1);
/* Same length, all bytes identical. Must be the same OID. */
return (0);
Ans = (oid *) xmalloc(sizeof(oid) * ALen);
if (Ans)
- memcpy(Ans, A, (sizeof(oid) * ALen));
+ memcpy(Ans, A, (sizeof(oid) * ALen));
return (Ans);
}
int ret;
debug(49, 7) ("oidlist_Find: Called.\n ");
- snmpDebugOid(7, Src, SrcLen);
+ snmpDebugOid(7, Src, SrcLen);
for (Ptr = squidMIBList; Ptr->GetFn; Ptr++) {
- ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen);
+ ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen);
- if (!ret) {
+ if (!ret) {
- /* Cool. We found the mib it's in. Let it find the function.
- */
- debug(49, 7) ("oidlist_Find: found, returning GetFn Ptr! \n");
+ /* Cool. We found the mib it's in. Let it find the function.
+ */
+ debug(49, 7) ("oidlist_Find: found, returning GetFn Ptr! \n");
- return ((*Ptr->GetFn) (Src, SrcLen));
- }
- if (ret < 0) {
- debug(49, 7) ("oidlist_Find: We just passed it, so it doesn't exist.\n ");
- /* We just passed it, so it doesn't exist. */
- return (NULL);
- }
+ return ((*Ptr->GetFn) (Src, SrcLen));
+ }
+ if (ret < 0) {
+ debug(49, 7) ("oidlist_Find: We just passed it, so it doesn't exist.\n ");
+ /* We just passed it, so it doesn't exist. */
+ return (NULL);
+ }
}
debug(49, 5) ("oidlist_Find: the request was past the end. It doesn't exist.\n");
for (Ptr = squidMIBList; Ptr->GetNextFn; Ptr++) {
- /* Only look at as much as we have stored */
- ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen);
-
- if (!ret) {
- debug(49, 6) ("oidlist_Next: Checking MIB\n");
-
- /* Cool. We found the mib it's in. Ask it.
- */
- while (Ptr != NULL && Ptr->GetNextFn) {
- Fn = ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP));
- if (Fn == NULL) {
- /* If this returned NULL, we're looking for the first
- * in the next MIB.
- */
- debug(49, 6) ("oidlist_Next: Not in this entry. Trying next.\n");
- Ptr++;
- continue;
- }
- return Fn;
- }
- /* Return what we found. NULL if it wasn't in the MIB, and there
- * were no more MIBs.
- */
- debug(49, 3) ("oidlist_Next: No next mib.\n");
- return NULL;
- }
- if (ret < 0) {
- /* We just passed the mib it would be in. Return
- * the next in this MIB.
- */
- debug(49, 3) ("oidlist_Next: Passed mib. Checking this one.\n");
- return ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP));
- }
+ /* Only look at as much as we have stored */
+ ret = oidncmp(Src, SrcLen, Ptr->Name, Ptr->NameLen, Ptr->NameLen);
+
+ if (!ret) {
+ debug(49, 6) ("oidlist_Next: Checking MIB\n");
+
+ /* Cool. We found the mib it's in. Ask it.
+ */
+ while (Ptr != NULL && Ptr->GetNextFn) {
+ Fn = ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP));
+ if (Fn == NULL) {
+ /* If this returned NULL, we're looking for the first
+ * in the next MIB.
+ */
+ debug(49, 6) ("oidlist_Next: Not in this entry. Trying next.\n");
+ Ptr++;
+ continue;
+ }
+ return Fn;
+ }
+ /* Return what we found. NULL if it wasn't in the MIB, and there
+ * were no more MIBs.
+ */
+ debug(49, 3) ("oidlist_Next: No next mib.\n");
+ return NULL;
+ }
+ if (ret < 0) {
+ /* We just passed the mib it would be in. Return
+ * the next in this MIB.
+ */
+ debug(49, 3) ("oidlist_Next: Passed mib. Checking this one.\n");
+ return ((*Ptr->GetNextFn) (Src, SrcLen, DestP, DestLenP));
+ }
}
/* We get here if the request was past the end. It doesn't exist. */
return &maddr;
}
-int
+int
fd_getMax()
{
fde *f;
int cnt = 0, num = 0;
while (cnt < Squid_MaxFD) {
- f = &fd_table[cnt++];
- if (!f->open)
- continue;
- if (f->type != FD_SOCKET)
- num++;
+ f = &fd_table[cnt++];
+ if (!f->open)
+ continue;
+ if (f->type != FD_SOCKET)
+ num++;
}
return num;
}
-
/*
- * $Id: stat.cc,v 1.223 1998/04/06 22:00:29 rousskov Exp $
+ * $Id: stat.cc,v 1.224 1998/04/06 22:32:20 wessels Exp $
*
* DEBUG: section 18 Cache Manager Statistics
* AUTHOR: Harvest Derived
int
get_median_svc(int interval, int which)
{
- return(int) statMedianSvc(interval, which);
+ return (int) statMedianSvc(interval, which);
}
StatCounters *
/*
- * $Id: store_digest.cc,v 1.2 1998/04/03 22:05:14 rousskov Exp $
+ * $Id: store_digest.cc,v 1.3 1998/04/06 22:32:21 wessels Exp $
*
* DEBUG: section 71 Store Digest Manager
* AUTHOR: Alex Rousskov
typedef struct {
StoreDigestCBlock cblock;
- int rebuild_lock; /* bucket number */
- StoreEntry *rewrite_lock; /* store entry with the digest */
- const char *other_lock; /* used buy external modules to pause rebuilds and rewrites */
+ int rebuild_lock; /* bucket number */
+ StoreEntry *rewrite_lock; /* store entry with the digest */
+ const char *other_lock; /* used buy external modules to pause rebuilds and rewrites */
int rebuild_offset;
int rewrite_offset;
int rebuild_count;
- int rewrite_count;
+ int rewrite_count;
} StoreDigestState;
/*
/* fake url suffix */
static const char *StoreDigestUrl = "cache_digest";
/* how often we want to rebuild the digest, seconds */
-static const time_t StoreDigestRebuildPeriod = 60*60;
+static const time_t StoreDigestRebuildPeriod = 60 * 60;
/* how often we want to rewrite the digest, seconds */
-static const time_t StoreDigestRewritePeriod = 60*60;
+static const time_t StoreDigestRewritePeriod = 60 * 60;
/* how many bytes to swap out at a time */
static const int StoreDigestSwapOutChunkSize = SM_PAGE_SIZE;
/* portion (0,1] of a hash table to be rescanned at a time */
static void storeDigestRebuildFinish();
static void storeDigestRebuildStep(void *datanotused);
static void storeDigestRewrite();
-static void storeDigestRewriteFinish(StoreEntry *e);
-static void storeDigestSwapOutStep(StoreEntry *e);
-static void storeDigestCBlockSwapOut(StoreEntry *e);
+static void storeDigestRewriteFinish(StoreEntry * e);
+static void storeDigestSwapOutStep(StoreEntry * e);
+static void storeDigestCBlockSwapOut(StoreEntry * e);
void
* Use 1.5*max#entries because 2*max#entries gives about 40% utilization.
*/
#if SQUID_MAINTAIN_CACHE_DIGEST
- const int cap = (int)(1.5 * Config.Swap.maxSize / Config.Store.avgObjectSize);
+ const int cap = (int) (1.5 * Config.Swap.maxSize / Config.Store.avgObjectSize);
store_digest = cacheDigestCreate(cap);
debug(71, 1) ("Using %d byte cache digest; rebuild/rewrite every %d/%d sec\n",
store_digest->mask_size, StoreDigestRebuildPeriod, StoreDigestRewritePeriod);
#endif
memset(&sd_state, 0, sizeof(sd_state));
cachemgrRegister("store_digest", "Store Digest",
- storeDigestReport, 0);
+ storeDigestReport, 0);
}
/* you probably want to call this before storeDigestRewriteContinue() */
/* externally initiated rewrite (inits store entry and pauses) */
void
-storeDigestRewriteStart(const char *initiator) {
+storeDigestRewriteStart(const char *initiator)
+{
assert(initiator);
assert(!sd_state.other_lock);
sd_state.other_lock = initiator;
/* continue externally initiated rewrite */
void
-storeDigestRewriteContinue(const char *initiator) {
+storeDigestRewriteContinue(const char *initiator)
+{
assert(initiator);
assert(!strcmp(sd_state.other_lock, initiator));
assert(sd_state.rewrite_lock);
sd_state.rebuild_offset = 0;
/* not clean()! */
cacheDigestClear(store_digest);
- debug(71, 2) ("storeDigestRebuild: start rebuild #%d\n", sd_state.rebuild_count+1);
+ debug(71, 2) ("storeDigestRebuild: start rebuild #%d\n", sd_state.rebuild_count + 1);
storeDigestRebuildStep(NULL);
}
static void
storeDigestRebuildStep(void *datanotused)
{
- int bcount = (int)ceil(store_hash_buckets*StoreDigestRebuildChunkPercent);
+ int bcount = (int) ceil(store_hash_buckets * StoreDigestRebuildChunkPercent);
assert(sd_state.rebuild_lock);
if (sd_state.rebuild_offset + bcount > store_hash_buckets)
bcount = store_hash_buckets - sd_state.rebuild_offset;
debug(71, 1) ("storeDigestRewrite: overlap detected, consider increasing rewrite period\n");
return;
}
- debug(71, 2) ("storeDigestRewrite: start rewrite #%d\n", sd_state.rewrite_count+1);
+ debug(71, 2) ("storeDigestRewrite: start rewrite #%d\n", sd_state.rewrite_count + 1);
/* make new store entry */
snprintf(url, sizeof(url), "http://%s:%d/squid-internal/%s",
getMyHostname(), Config.Port.http->i, StoreDigestUrl);
e->mem_obj->request = requestLink(urlParse(METHOD_GET, url));
httpReplyReset(e->mem_obj->reply);
httpReplySetHeaders(e->mem_obj->reply, 1.0, 200, "Cache Digest OK",
- "application/cache-digest", store_digest->mask_size+sizeof(sd_state.cblock),
+ "application/cache-digest", store_digest->mask_size + sizeof(sd_state.cblock),
squid_curtime, squid_curtime + StoreDigestRewritePeriod);
storeBuffer(e);
httpReplySwapOut(e->mem_obj->reply, e);
/* finishes swap out sequence for the digest; schedules next rewrite */
static void
-storeDigestRewriteFinish(StoreEntry *e)
+storeDigestRewriteFinish(StoreEntry * e)
{
assert(e);
assert(e == sd_state.rewrite_lock);
/* swaps out one digest "chunk" per invocation; schedules next swap out */
static void
-storeDigestSwapOutStep(StoreEntry *e)
+storeDigestSwapOutStep(StoreEntry * e)
{
int chunk_size = StoreDigestSwapOutChunkSize;
assert(e);
if (sd_state.rewrite_offset >= store_digest->mask_size)
storeDigestRewriteFinish(e);
else
- eventAdd("storeDigestSwapOutStep", (EVH*) storeDigestSwapOutStep, e, 0);
+ eventAdd("storeDigestSwapOutStep", (EVH *) storeDigestSwapOutStep, e, 0);
}
static void
-storeDigestCBlockSwapOut(StoreEntry *e)
+storeDigestCBlockSwapOut(StoreEntry * e)
{
/*
* when we actually start using control block, do not forget to convert to
* network byte order if needed
*/
memset(&sd_state.cblock, 0, sizeof(sd_state.cblock));
- storeAppend(e, (char*) &sd_state.cblock, sizeof(sd_state.cblock));
+ storeAppend(e, (char *) &sd_state.cblock, sizeof(sd_state.cblock));
}
void
-storeDigestReport(StoreEntry *e)
+storeDigestReport(StoreEntry * e)
{
if (store_digest) {
cacheDigestReport(store_digest, "store", e);
storeAppendPrintf(e, "store digest: disabled.\n");
}
}
-
/*
- * $Id: store_rebuild.cc,v 1.29 1998/04/04 04:50:32 wessels Exp $
+ * $Id: store_rebuild.cc,v 1.30 1998/04/06 22:32:21 wessels Exp $
*
* DEBUG: section 20 Store Rebuild Routines
* AUTHOR: Duane Wessels
e->ping_status = PING_NONE;
EBIT_CLR(e->flag, ENTRY_VALIDATED);
storeDirMapBitSet(e->swap_file_number);
- storeHashInsert(e, key); /* do it after we clear KEY_PRIVATE */
+ storeHashInsert(e, key); /* do it after we clear KEY_PRIVATE */
return e;
}
-
-
struct _acl_ip_data {
struct in_addr addr1; /* if addr2 non-zero then its a range */
struct in_addr addr2;
/*
- * $Id: test_cache_digest.cc,v 1.16 1998/04/02 17:11:28 rousskov Exp $
+ * $Id: test_cache_digest.cc,v 1.17 1998/04/06 22:32:22 wessels Exp $
*
* AUTHOR: Alex Rousskov
*
Cache *peer;
CacheQueryStats qstats;
int count; /* #currently cached entries */
- int req_count; /* #requests to this cache */
+ int req_count; /* #requests to this cache */
int bad_add_count; /* #duplicate adds */
int bad_del_count; /* #dels with no prior add */
};
typedef struct {
cache_key key[MD5_DIGEST_CHARS];
time_t timestamp;
- short int use_icp; /* true/false */
+ short int use_icp; /* true/false */
} RawAccessLogEntry;
-typedef enum { frError = -2, frMore = -1, frEof = 0, frOk = 1 } fr_result;
+typedef enum {
+ frError = -2, frMore = -1, frEof = 0, frOk = 1
+} fr_result;
typedef struct _FileIterator FileIterator;
-typedef fr_result (*FI_READER)(FileIterator *fi);
+typedef fr_result(*FI_READER) (FileIterator * fi);
struct _FileIterator {
const char *fname;
FILE *file;
- time_t inner_time; /* timestamp of the current entry */
- time_t time_offset; /* to adjust time set by reader */
- int line_count; /* number of lines scanned */
- int bad_line_count; /* number of parsing errors */
- int time_warp_count;/* number of out-of-order entries in the file */
- FI_READER reader; /* reads next entry and updates inner_time */
- void *entry; /* buffer for the current entry, freed with xfree() */
+ time_t inner_time; /* timestamp of the current entry */
+ time_t time_offset; /* to adjust time set by reader */
+ int line_count; /* number of lines scanned */
+ int bad_line_count; /* number of parsing errors */
+ int time_warp_count; /* number of out-of-order entries in the file */
+ FI_READER reader; /* reads next entry and updates inner_time */
+ void *entry; /* buffer for the current entry, freed with xfree() */
};
/* globals */
-static time_t cur_time = -1; /* timestamp of the current log entry */
+static time_t cur_time = -1; /* timestamp of the current log entry */
#if 0
/* FileIterator */
-static void fileIteratorAdvance(FileIterator *fi);
+static void fileIteratorAdvance(FileIterator * fi);
static FileIterator *
fileIteratorCreate(const char *fname, FI_READER reader)
}
static void
-fileIteratorDestroy(FileIterator *fi)
+fileIteratorDestroy(FileIterator * fi)
{
assert(fi);
if (fi->file) {
}
static void
-fileIteratorSetCurTime(FileIterator *fi, time_t ct)
+fileIteratorSetCurTime(FileIterator * fi, time_t ct)
{
assert(fi);
assert(fi->inner_time > 0);
}
static void
-fileIteratorAdvance(FileIterator *fi)
+fileIteratorAdvance(FileIterator * fi)
{
int res;
assert(fi);
fi->inner_time = last_time;
else
fi->inner_time += fi->time_offset;
- if (res == frError)
+ if (res == frError)
fi->bad_line_count++;
- else
- if (res == frEof) {
+ else if (res == frEof) {
fprintf(stderr, "exhausted %s (%d entries) at %s",
fi->fname, fi->line_count, ctime(&fi->inner_time));
fi->inner_time = -1;
- } else
- if (fi->inner_time < last_time) {
+ } else if (fi->inner_time < last_time) {
assert(last_time >= 0);
fi->time_warp_count++;
fi->inner_time = last_time;
hash = cache->hash;
/* destroy hash table contents */
for (e = hash_first(hash); e; e = hash_next(hash)) {
- hash_remove_link(hash, (hash_link*)e);
+ hash_remove_link(hash, (hash_link *) e);
cacheEntryDestroy(e);
}
/* destroy the hash table itself */
- hashFreeMemory(hash);
+ hashFreeMemory(hash);
if (cache->digest)
cacheDigestDestroy(cache->digest);
xfree(cache);
if (cache->digest)
cacheDigestDestroy(cache->digest);
hash = cache->hash;
- cache->digest = cacheDigestCreate(2 * cache->count + 1); /* 50% utilization */
+ cache->digest = cacheDigestCreate(2 * cache->count + 1); /* 50% utilization */
if (!cache->count)
return;
gettimeofday(&t_start, NULL);
}
static void
-cacheQueryReport(Cache * cache, CacheQueryStats *stats)
+cacheQueryReport(Cache * cache, CacheQueryStats * stats)
{
- fprintf(stdout, "%s: peer queries: %d (%d%%)\n",
- cache->name,
+ fprintf(stdout, "%s: peer queries: %d (%d%%)\n",
+ cache->name,
stats->query_count, xpercentInt(stats->query_count, cache->req_count)
);
fprintf(stdout, "%s: t-hit: %d (%d%%) t-miss: %d (%d%%) t-*: %d (%d%%)\n",
- cache->name,
+ cache->name,
stats->true_hit_count, xpercentInt(stats->true_hit_count, stats->query_count),
stats->true_miss_count, xpercentInt(stats->true_miss_count, stats->query_count),
stats->true_hit_count + stats->true_miss_count,
xpercentInt(stats->true_hit_count + stats->true_miss_count, stats->query_count)
);
fprintf(stdout, "%s: f-hit: %d (%d%%) f-miss: %d (%d%%) f-*: %d (%d%%)\n",
- cache->name,
+ cache->name,
stats->false_hit_count, xpercentInt(stats->false_hit_count, stats->query_count),
stats->false_miss_count, xpercentInt(stats->false_miss_count, stats->query_count),
stats->false_hit_count + stats->false_miss_count,
static void
cacheReport(Cache * cache)
{
- fprintf(stdout, "%s: entries: %d reqs: %d bad-add: %d bad-del: %d\n",
+ fprintf(stdout, "%s: entries: %d reqs: %d bad-add: %d bad-del: %d\n",
cache->name, cache->count, cache->req_count,
cache->bad_add_count, cache->bad_del_count);
}
static void
-cacheFetch(Cache *cache, const RawAccessLogEntry *e)
+cacheFetch(Cache * cache, const RawAccessLogEntry * e)
{
assert(e);
cache->req_count++;
memset(fi->entry, 0, sizeof(RawAccessLogEntry));
entry = fi->entry;
if (!fgets(buf, sizeof(buf), fi->file))
- return frEof; /* eof */
- entry->timestamp = fi->inner_time = (time_t)atoi(buf);
+ return frEof; /* eof */
+ entry->timestamp = fi->inner_time = (time_t) atoi(buf);
url = strstr(buf, "://");
hier = url ? strstr(url, " - ") : NULL;
* fname, scanned_count, method, buf); */
return frError;
}
- while (*url) url--;
+ while (*url)
+ url--;
url++;
*hier = '\0';
hier += 3;
*strchr(hier, '/') = '\0';
/*fprintf(stdout, "%s:%d: %s %s %s\n",
* fname, count, method, url, hier); */
- entry->use_icp = strcmp(hier, "NONE");
- /* no ICP lookup for these status codes */
-/* strcmp(hier, "NONE") &&
- strcmp(hier, "DIRECT") &&
- strcmp(hier, "FIREWALL_IP_DIRECT") &&
- strcmp(hier, "LOCAL_IP_DIRECT") &&
- strcmp(hier, "NO_DIRECT_FAIL") &&
- strcmp(hier, "NO_PARENT_DIRECT") &&
- strcmp(hier, "SINGLE_PARENT") &&
- strcmp(hier, "PASSTHROUGH_PARENT") &&
- strcmp(hier, "SSL_PARENT_MISS") &&
- strcmp(hier, "DEFAULT_PARENT");
-*/
+ entry->use_icp = strcmp(hier, "NONE");
+ /* no ICP lookup for these status codes */
+/* strcmp(hier, "NONE") &&
+ * strcmp(hier, "DIRECT") &&
+ * strcmp(hier, "FIREWALL_IP_DIRECT") &&
+ * strcmp(hier, "LOCAL_IP_DIRECT") &&
+ * strcmp(hier, "NO_DIRECT_FAIL") &&
+ * strcmp(hier, "NO_PARENT_DIRECT") &&
+ * strcmp(hier, "SINGLE_PARENT") &&
+ * strcmp(hier, "PASSTHROUGH_PARENT") &&
+ * strcmp(hier, "SSL_PARENT_MISS") &&
+ * strcmp(hier, "DEFAULT_PARENT");
+ */
memcpy(entry->key, storeKeyPublic(url, method_id), sizeof(entry->key));
/*fprintf(stdout, "%s:%d: %s %s %s %s\n",
- fname, count, method, storeKeyText(entry->key), url, hier); */
+ * fname, count, method, storeKeyText(entry->key), url, hier); */
return frOk;
}
static void
-cachePurge(Cache *cache, storeSwapLogData *s, int update_digest)
+cachePurge(Cache * cache, storeSwapLogData * s, int update_digest)
{
CacheEntry *olde = (CacheEntry *) hash_lookup(cache->hash, s->key);
if (!olde) {
}
static void
-cacheStore(Cache *cache, storeSwapLogData *s, int update_digest)
+cacheStore(Cache * cache, storeSwapLogData * s, int update_digest)
{
CacheEntry *olde = (CacheEntry *) hash_lookup(cache->hash, s->key);
if (olde) {
}
static void
-cacheUpdateStore(Cache *cache, storeSwapLogData *s, int update_digest)
+cacheUpdateStore(Cache * cache, storeSwapLogData * s, int update_digest)
{
switch (s->op) {
- case SWAP_LOG_ADD:
- cacheStore(cache, s, update_digest);
- break;
- case SWAP_LOG_DEL:
- cachePurge(cache, s, update_digest);
- break;
- default:
- assert(0);
+ case SWAP_LOG_ADD:
+ cacheStore(cache, s, update_digest);
+ break;
+ case SWAP_LOG_DEL:
+ cachePurge(cache, s, update_digest);
+ break;
+ default:
+ assert(0);
}
}
main(int argc, char *argv[])
{
FileIterator **fis = NULL;
- const int fi_count = argc-1;
+ const int fi_count = argc - 1;
int active_fi_count = 0;
time_t ready_time;
Cache *them, *us;
/* init iterators with files */
fis[0] = fileIteratorCreate(argv[1], accessLogReader);
for (i = 2; i < argc; ++i)
- fis[i-1] = fileIteratorCreate(argv[i], swapStateReader);
+ fis[i - 1] = fileIteratorCreate(argv[i], swapStateReader);
/* check that all files were found */
for (i = 0; i < fi_count; ++i)
- if (!fis[i]) return -2;
+ if (!fis[i])
+ return -2;
/* read prefix to get start-up contents of the peer cache */
ready_time = -1;
for (i = 1; i < fi_count; ++i) {
FileIterator *fi = fis[i];
while (fi->inner_time > 0) {
- if (((storeSwapLogData*)fi->entry)->op == SWAP_LOG_DEL) {
+ if (((storeSwapLogData *) fi->entry)->op == SWAP_LOG_DEL) {
cachePurge(them, fi->entry, 0);
if (ready_time < 0)
- ready_time = fi->inner_time;
+ ready_time = fi->inner_time;
} else {
if (ready_time > 0 && fi->inner_time > ready_time)
break;
}
/* digest peer cache content */
cacheResetDigest(them);
- us->digest = cacheDigestClone(them->digest); /* @netw@ */
+ us->digest = cacheDigestClone(them->digest); /* @netw@ */
/* shift the time in access log to match ready_time */
fileIteratorSetCurTime(fis[0], ready_time);
/* iterate, use the iterator with the smallest positive inner_time */
cur_time = -1;
do {
- int next_i = -1;
- time_t next_time = -1;
+ int next_i = -1;
+ time_t next_time = -1;
active_fi_count = 0;
for (i = 0; i < fi_count; ++i) {
if (fis[i]->inner_time >= 0) {
- if (!active_fi_count || fis[i]->inner_time < next_time) {
+ if (!active_fi_count || fis[i]->inner_time < next_time) {
next_i = i;
next_time = fis[i]->inner_time;
}
}
if (next_i >= 0) {
cur_time = next_time;
- /*fprintf(stderr, "%2d time: %d %s", next_i, (int)cur_time, ctime(&cur_time));*/
+ /*fprintf(stderr, "%2d time: %d %s", next_i, (int)cur_time, ctime(&cur_time)); */
if (next_i == 0)
cacheFetch(us, fis[next_i]->entry);
else
cacheQueryReport(us, &us->qstats);
/* clean */
- for (i = 0; i < argc-1; ++i) {
+ for (i = 0; i < argc - 1; ++i) {
fileIteratorDestroy(fis[i]);
}
xfree(fis);