static uint32_t hashed_keys[4];
void
-CacheDigest::init(int newCapacity)
+CacheDigest::init(uint64_t newCapacity)
{
const auto newMaskSz = CacheDigest::CalcMaskSize(newCapacity, bits_per_entry);
assert(newCapacity > 0 && bits_per_entry > 0);
- assert(newMaskSz > 0);
+ assert(newMaskSz != 0);
capacity = newCapacity;
mask_size = newMaskSz;
mask = static_cast<char *>(xcalloc(mask_size,1));
<< mask_size << " bytes");
}
-CacheDigest::CacheDigest(int aCapacity, int bpe) :
+CacheDigest::CacheDigest(uint64_t aCapacity, uint8_t bpe) :
+ count(0),
+ del_count(0),
+ capacity(0),
mask(nullptr),
mask_size(0),
- capacity(0),
- bits_per_entry(bpe),
- count(0),
- del_count(0)
+ bits_per_entry(bpe)
{
assert(SQUID_MD5_DIGEST_LENGTH == 16); /* our hash functions rely on 16 byte keys */
updateCapacity(aCapacity);
}
void
-CacheDigest::updateCapacity(int newCapacity)
+CacheDigest::updateCapacity(uint64_t newCapacity)
{
safe_free(mask);
init(newCapacity); // will re-init mask and mask_size
storeAppendPrintf(e, "%s digest: size: %d bytes\n",
label ? label : "", stats.bit_count / 8
);
- storeAppendPrintf(e, "\t entries: count: %d capacity: %d util: %d%%\n",
+ storeAppendPrintf(e, "\t entries: count: %" PRIu64 " capacity: %" PRIu64 " util: %d%%\n",
cd->count,
cd->capacity,
xpercentInt(cd->count, cd->capacity)
);
- storeAppendPrintf(e, "\t deletion attempts: %d\n",
+ storeAppendPrintf(e, "\t deletion attempts: %" PRIu64 "\n",
cd->del_count
);
storeAppendPrintf(e, "\t bits: per entry: %d on: %d capacity: %d util: %d%%\n",
);
}
-size_t
-CacheDigest::CalcMaskSize(int cap, int bpe)
+uint32_t
+CacheDigest::CalcMaskSize(uint64_t cap, uint8_t bpe)
{
- // XXX: might 32-bit overflow during multiply
- return (size_t) (cap * bpe + 7) / 8;
+ uint64_t bitCount = (cap * bpe) + 7;
+ assert(bitCount < INT_MAX); // dont 31-bit overflow later
+ return static_cast<uint32_t>(bitCount / 8);
}
static void
cacheDigestHashKey(const CacheDigest * cd, const cache_key * key)
{
- const unsigned int bit_count = cd->mask_size * 8;
+ const uint32_t bit_count = cd->mask_size * 8;
unsigned int tmp_keys[4];
/* we must memcpy to ensure alignment */
memcpy(tmp_keys, key, sizeof(tmp_keys));
{
MEMPROXY_CLASS(CacheDigest);
public:
- CacheDigest(int capacity, int bpe);
+ CacheDigest(uint64_t capacity, uint8_t bpe);
~CacheDigest();
// NP: only used by broken unit-test
void clear();
/// changes mask size to fit newCapacity, resets bits to 0
- void updateCapacity(int newCapacity);
+ void updateCapacity(uint64_t newCapacity);
void add(const cache_key * key);
void remove(const cache_key * key);
/// calculate the size of mask required to digest up to
/// a specified capacity and bitsize.
- static size_t CalcMaskSize(int cap, int bpe);
+ static uint32_t CalcMaskSize(uint64_t cap, uint8_t bpe);
private:
- void init(int newCapacity);
+ void init(uint64_t newCapacity);
public:
/* public, read-only */
- char *mask; /* bit mask */
- int mask_size; /* mask size in bytes */
- int capacity; /* expected maximum for .count, not a hard limit */
- int bits_per_entry; /* number of bits allocated for each entry from capacity */
- int count; /* number of digested entries */
- int del_count; /* number of deletions performed so far */
+ uint64_t count; /* number of digested entries */
+ uint64_t del_count; /* number of deletions performed so far */
+ uint64_t capacity; /* expected maximum for .count, not a hard limit */
+ char *mask; /* bit mask */
+ uint32_t mask_size; /* mask size in bytes */
+ int8_t bits_per_entry; /* number of bits allocated for each entry from capacity */
};
void cacheDigestGuessStatsUpdate(CacheDigestGuessStats * stats, int real_hit, int guess_hit);
static void storeDigestRewriteFinish(StoreEntry * e);
static EVH storeDigestSwapOutStep;
static void storeDigestCBlockSwapOut(StoreEntry * e);
-static int storeDigestCalcCap(void);
-static int storeDigestResize(void);
static void storeDigestAdd(const StoreEntry *);
-#endif /* USE_CACHE_DIGESTS */
-
static void
storeDigestRegisterWithCacheManager(void)
{
Mgr::RegisterAction("store_digest", "Store Digest", storeDigestReport, 0, 1);
}
-/*
- * PUBLIC FUNCTIONS
- */
+/// calculates digest capacity
+static uint64_t
+storeDigestCalcCap()
+{
+ /*
+ * To-Do: Bloom proved that the optimal filter utilization is 50% (half of
+ * the bits are off). However, we do not have a formula to calculate the
+ * number of _entries_ we want to pre-allocate for.
+ */
+ const uint64_t hi_cap = Store::Root().maxSize() / Config.Store.avgObjectSize;
+ const uint64_t lo_cap = 1 + Store::Root().currentSize() / Config.Store.avgObjectSize;
+ const uint64_t e_count = StoreEntry::inUseCount();
+ uint64_t cap = e_count ? e_count : hi_cap;
+ debugs(71, 2, "have: " << e_count << ", want " << cap <<
+ " entries; limits: [" << lo_cap << ", " << hi_cap << "]");
+
+ if (cap < lo_cap)
+ cap = lo_cap;
+
+ /* do not enforce hi_cap limit, average-based estimation may be wrong
+ *if (cap > hi_cap)
+ * cap = hi_cap;
+ */
+
+ // Bug 4534: we still have to set an upper-limit at some reasonable value though.
+ // this matches cacheDigestCalcMaskSize doing (cap*bpe)+7 < INT_MAX
+ const uint64_t absolute_max = (INT_MAX -8) / Config.digest.bits_per_entry;
+ if (cap > absolute_max) {
+ debugs(71, DBG_CRITICAL, "WARNING: Cache Digest cannot store " << cap << " entries. Limiting to " << absolute_max);
+ cap = absolute_max;
+ }
+
+ return cap;
+}
+#endif /* USE_CACHE_DIGESTS */
void
storeDigestInit(void)
storeDigestRegisterWithCacheManager();
#if USE_CACHE_DIGESTS
- const int cap = storeDigestCalcCap();
-
if (!Config.onoff.digest_generation) {
store_digest = NULL;
debugs(71, 3, "Local cache digest generation disabled");
return;
}
+ const uint64_t cap = storeDigestCalcCap();
store_digest = new CacheDigest(cap, Config.digest.bits_per_entry);
debugs(71, DBG_IMPORTANT, "Local cache digest enabled; rebuild/rewrite every " <<
(int) Config.digest.rebuild_period << "/" <<
storeDigestRebuildResume();
}
+/// \returns true if we actually resized the digest
+static bool
+storeDigestResize()
+{
+ const uint64_t cap = storeDigestCalcCap();
+ assert(store_digest);
+ uint64_t diff = abs(cap - store_digest->capacity);
+ debugs(71, 2, store_digest->capacity << " -> " << cap << "; change: " <<
+ diff << " (" << xpercentInt(diff, store_digest->capacity) << "%)" );
+ /* avoid minor adjustments */
+
+ if (diff <= store_digest->capacity / 10) {
+ debugs(71, 2, "small change, will not resize.");
+ return false;
+ } else {
+ debugs(71, 2, "big change, resizing.");
+ store_digest->updateCapacity(cap);
+ }
+ return true;
+}
+
/* called be Rewrite to push Rebuild forward */
static void
storeDigestRebuildResume(void)
assert(e);
/* _add_ check that nothing bad happened while we were waiting @?@ @?@ */
- if (sd_state.rewrite_offset + chunk_size > store_digest->mask_size)
+ if (static_cast<uint32_t>(sd_state.rewrite_offset + chunk_size) > store_digest->mask_size)
chunk_size = store_digest->mask_size - sd_state.rewrite_offset;
e->append(store_digest->mask + sd_state.rewrite_offset, chunk_size);
sd_state.rewrite_offset += chunk_size;
/* are we done ? */
- if (sd_state.rewrite_offset >= store_digest->mask_size)
+ if (static_cast<uint32_t>(sd_state.rewrite_offset) >= store_digest->mask_size)
storeDigestRewriteFinish(e);
else
eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, data, 0.0, 1, false);
sd_state.cblock.count = htonl(store_digest->count);
sd_state.cblock.del_count = htonl(store_digest->del_count);
sd_state.cblock.mask_size = htonl(store_digest->mask_size);
- sd_state.cblock.bits_per_entry = (unsigned char)
- Config.digest.bits_per_entry;
+ sd_state.cblock.bits_per_entry = Config.digest.bits_per_entry;
sd_state.cblock.hash_func_count = (unsigned char) CacheDigestHashFuncCount;
e->append((char *) &sd_state.cblock, sizeof(sd_state.cblock));
}
-/* calculates digest capacity */
-static int
-storeDigestCalcCap(void)
-{
- /*
- * To-Do: Bloom proved that the optimal filter utilization is 50% (half of
- * the bits are off). However, we do not have a formula to calculate the
- * number of _entries_ we want to pre-allocate for.
- */
- const int hi_cap = Store::Root().maxSize() / Config.Store.avgObjectSize;
- const int lo_cap = 1 + Store::Root().currentSize() / Config.Store.avgObjectSize;
- const int e_count = StoreEntry::inUseCount();
- int cap = e_count ? e_count :hi_cap;
- debugs(71, 2, "storeDigestCalcCap: have: " << e_count << ", want " << cap <<
- " entries; limits: [" << lo_cap << ", " << hi_cap << "]");
-
- if (cap < lo_cap)
- cap = lo_cap;
-
- /* do not enforce hi_cap limit, average-based estimation may be wrong
- *if (cap > hi_cap)
- * cap = hi_cap;
- */
- return cap;
-}
-
-/* returns true if we actually resized the digest */
-static int
-storeDigestResize(void)
-{
- const int cap = storeDigestCalcCap();
- int diff;
- assert(store_digest);
- diff = abs(cap - store_digest->capacity);
- debugs(71, 2, "storeDigestResize: " <<
- store_digest->capacity << " -> " << cap << "; change: " <<
- diff << " (" << xpercentInt(diff, store_digest->capacity) << "%)" );
- /* avoid minor adjustments */
-
- if (diff <= store_digest->capacity / 10) {
- debugs(71, 2, "storeDigestResize: small change, will not resize.");
- return 0;
- } else {
- debugs(71, 2, "storeDigestResize: big change, resizing.");
- store_digest->updateCapacity(cap);
- return 1;
- }
-}
-
#endif /* USE_CACHE_DIGESTS */