/*
- * $Id: CacheDigest.cc,v 1.11 1998/04/07 23:23:36 rousskov Exp $
+ * $Id: CacheDigest.cc,v 1.12 1998/04/14 15:16:23 rousskov Exp $
*
* DEBUG: section 70 Cache Digest
* AUTHOR: Alex Rousskov
/* local functions */
static void cacheDigestHashKey(const CacheDigest *cd, const cache_key *key);
+static size_t cacheDigestCalcMaskSize(int cap);
/* configuration params */
static const int BitsPerEntry = 4;
CacheDigest *
cacheDigestCreate(int capacity)
{
- const size_t mask_size = (size_t) (capacity * BitsPerEntry + 7) / 8;
+ const size_t mask_size = cacheDigestCalcMaskSize(capacity);
CacheDigest *cd = cacheDigestSizedCreate(mask_size, capacity);
return cd;
}
memset(cd->mask, 0, cd->mask_size);
}
+void
+cacheDigestChangeCap(CacheDigest * cd, int new_cap)
+{
+ assert(cd);
+ /* have to clear because capacity changes hash functions */
+ cacheDigestClear(cd);
+ cd->capacity = new_cap;
+ cd->mask_size = cacheDigestCalcMaskSize(new_cap);
+ cd->mask = xrealloc(cd->mask, cd->mask_size);
+}
+
/* returns true if the key belongs to the digest */
int
cacheDigestTest(const CacheDigest * cd, const cache_key * key)
);
}
+static size_t
+cacheDigestCalcMaskSize(int cap)
+{
+ return (size_t) (cap * BitsPerEntry + 7) / 8;
+}
+
static void
cacheDigestHashKey(const CacheDigest *cd, const cache_key *key)
{
/*
- * $Id: client_side.cc,v 1.276 1998/04/12 06:13:56 rousskov Exp $
+ * $Id: client_side.cc,v 1.277 1998/04/14 15:16:24 rousskov Exp $
*
* DEBUG: section 33 Client-side Routines
* AUTHOR: Duane Wessels
if (http->internal && strStr(http->request->urlpath, StoreDigestUrlPath)) {
kb_incr(&Counter.cd.kbytes_sent, http->out.size);
Counter.cd.msgs_sent++;
+ debug(33, 1) ("Client %s requested local cache digest (%d bytes)\n",
+ inet_ntoa(http->request->client_addr), http->out.size);
}
/* @?@ split this ugly if-monster */
if (/* we used ICP or CD for peer selecton */
EBIT_TEST(http->request->flags, REQ_CACHABLE) &&
/* paranoid: we have a reply pointer */
(reply = storeEntryReply(http->entry))) {
+
const char *x_cache_fld = httpHeaderGetLastStr(&reply->header, HDR_X_CACHE);
const int real_hit = x_cache_fld && !strncmp(x_cache_fld, "HIT", 3);
const int guess_hit = LOOKUP_HIT == H->cd_lookup;
cacheDigestGuessStatsUpdate(&peer->digest.stats.guess,
real_hit, guess_hit);
} else {
- /* temporary paranoid debug */
+ /* temporary paranoid debug @?@ */
static int max_count = 200;
if (max_count > 0) {
debug(33,1) ("clientUpdateCounters: lost peer %s for %s! (%d)\n",
/*
- * $Id: peer_digest.cc,v 1.13 1998/04/12 06:10:06 rousskov Exp $
+ * $Id: peer_digest.cc,v 1.14 1998/04/14 15:16:25 rousskov Exp $
*
* DEBUG: section 72 Peer Digest Routines
* AUTHOR: Alex Rousskov
#define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
/* min interval for requesting digests from the same peer */
-static const time_t PeerDigestRequestMinGap = 10 * 60; /* seconds */
+static const time_t PeerDigestRequestMinGap = 15 * 60; /* seconds */
/* min interval for requesting digests at start */
static const time_t GlobalDigestRequestMinGap = 1 * 60; /* seconds */
/* release buggy entry */
storeReleaseRequest(fetch->entry);
} else {
- if (fetch->entry->store_status == STORE_OK) {
+ /* ugly condition, but how? @?@ @?@ */
+ if (fetch->entry->mem_obj->reply->sline.status == HTTP_NOT_MODIFIED) {
debug(72, 2) ("re-used old digest from %s\n", peer->host);
} else {
debug(72, 2) ("received valid digest from %s\n", peer->host);
peerDigestDelay(peer, 0,
max_delay(peerDigestExpiresDelay(peer, fetch->entry), 0));
}
- /* note: outgoing numbers are not precise! @?@ */
/* update global stats */
+ /* note: outgoing numbers are not precise! @?@ */
kb_incr(&Counter.cd.kbytes_sent, req->headers_sz);
kb_incr(&Counter.cd.kbytes_recv, (size_t)b_read);
Counter.cd.msgs_sent++;
extern void cacheDigestDestroy(CacheDigest * cd);
extern CacheDigest *cacheDigestClone(const CacheDigest * cd);
extern void cacheDigestClear(CacheDigest * cd);
+extern void cacheDigestChangeCap(CacheDigest * cd, int new_cap);
extern int cacheDigestTest(const CacheDigest * cd, const cache_key * key);
extern void cacheDigestAdd(CacheDigest * cd, const cache_key * key);
extern void cacheDigestDel(CacheDigest * cd, const cache_key * key);
/*
- * $Id: store_digest.cc,v 1.6 1998/04/08 22:52:38 rousskov Exp $
+ * $Id: store_digest.cc,v 1.7 1998/04/14 15:16:26 rousskov Exp $
*
* DEBUG: section 71 Store Digest Manager
* AUTHOR: Alex Rousskov
static const int StoreDigestSwapOutChunkSize = SM_PAGE_SIZE;
/* portion (0,1] of a hash table to be rescanned at a time */
static const double StoreDigestRebuildChunkPercent = 0.10;
+/* Fudge Factor for sizing the digest */
+static const double StoreDigestFudgeFactor = 1.5;
/* local vars */
static StoreDigestState sd_state;
static void storeDigestRewriteFinish(StoreEntry * e);
static void storeDigestSwapOutStep(StoreEntry * e);
static void storeDigestCBlockSwapOut(StoreEntry * e);
+static int storeDigestCalcCap();
+static int storeDigestResize();
void
storeDigestInit()
{
- /*
- * To-Do: Bloom proved that the optimal filter utilization is 50% (half of
- * the bits are off). However, we do not have a formula to calculate the
- * number of _entries_ we want to pre-allocate for.
- * Use 1.5*max#entries because 2*max#entries gives about 40% utilization.
- */
#if SQUID_MAINTAIN_CACHE_DIGEST
- const int cap = (int) (1.5 * Config.Swap.maxSize / Config.Store.avgObjectSize);
+ const int cap = storeDigestCalcCap();
store_digest = cacheDigestCreate(cap);
- debug(71, 1) ("Using %d byte cache digest; rebuild/rewrite every %d/%d sec\n",
- store_digest->mask_size, StoreDigestRebuildPeriod, StoreDigestRewritePeriod);
+ debug(71, 1) ("Using %d byte cache digest (%d entries); rebuild/rewrite every %d/%d sec\n",
+ store_digest->mask_size, store_digest->capacity,
+ StoreDigestRebuildPeriod, StoreDigestRewritePeriod);
#else
store_digest = NULL;
debug(71, 1) ("Local cache digest is 'off'\n");
}
sd_state.rebuild_lock = 1;
sd_state.rebuild_offset = 0;
- /* not clean()! */
- cacheDigestClear(store_digest);
+ /* resize or clear */
+ if (!storeDigestResize())
+ cacheDigestClear(store_digest); /* not clean()! */
debug(71, 2) ("storeDigestRebuild: start rebuild #%d\n", sd_state.rebuild_count + 1);
storeDigestRebuildStep(NULL);
}
storeAppend(e, (char*) &sd_state.cblock, sizeof(sd_state.cblock));
}
+/* calculates digest capacity */
+static int
+storeDigestCalcCap()
+{
+ /*
+ * To-Do: Bloom proved that the optimal filter utilization is 50% (half of
+ * the bits are off). However, we do not have a formula to calculate the
+ * number of _entries_ we want to pre-allocate for.
+ */
+ const int hi_cap = store_table->size;
+ const int lo_cap = hi_cap/5;
+ int cap = (int) (1.5 * memInUse(MEM_STOREENTRY));
+ if (cap < lo_cap)
+ cap = lo_cap;
+ if (cap > hi_cap)
+ cap = hi_cap;
+ cap = 1 + (int) (StoreDigestFudgeFactor * cap);
+ return cap;
+}
+
+/* returns true if we actually resized the digest */
+static int
+storeDigestResize()
+{
+ const int cap = storeDigestCalcCap();
+ int diff;
+ assert(store_digest);
+ diff = abs(cap - store_digest->capacity);
+ debug(71, 2) ("storeDigestResize: %d -> %d; change: %d (%d%%)\n",
+ store_digest->capacity, cap, diff,
+ xpercentInt(diff, store_digest->capacity));
+ /* avoid minor adjustments */
+ if (diff <= store_digest->capacity/10) {
+ debug(71, 1) ("storeDigestResize: small change, will not resize.\n");
+ return 0; /* at most 10% change */
+ } else {
+ debug(71, 1) ("storeDigestResize: big change, resizing.\n");
+ cacheDigestChangeCap(store_digest, cap);
+ return 1;
+ }
+}
+
void
storeDigestReport(StoreEntry * e)
{