]>
Commit | Line | Data |
---|---|---|
8638fc66 | 1 | /* |
7e3ce7b9 | 2 | * $Id: store_digest.cc,v 1.36 1999/12/30 17:36:55 wessels Exp $ |
8638fc66 | 3 | * |
4 | * DEBUG: section 71 Store Digest Manager | |
5 | * AUTHOR: Alex Rousskov | |
6 | * | |
7 | * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ | |
e25c139f | 8 | * ---------------------------------------------------------- |
8638fc66 | 9 | * |
10 | * Squid is the result of efforts by numerous individuals from the | |
11 | * Internet community. Development is led by Duane Wessels of the | |
e25c139f | 12 | * National Laboratory for Applied Network Research and funded by the |
13 | * National Science Foundation. Squid is Copyrighted (C) 1998 by | |
14 | * Duane Wessels and the University of California San Diego. Please | |
15 | * see the COPYRIGHT file for full details. Squid incorporates | |
16 | * software developed and/or copyrighted by other sources. Please see | |
17 | * the CREDITS file for full details. | |
8638fc66 | 18 | * |
19 | * This program is free software; you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation; either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | * | |
24 | * This program is distributed in the hope that it will be useful, | |
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
27 | * GNU General Public License for more details. | |
28 | * | |
29 | * You should have received a copy of the GNU General Public License | |
30 | * along with this program; if not, write to the Free Software | |
cbdec147 | 31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. |
e25c139f | 32 | * |
8638fc66 | 33 | */ |
34 | ||
6168bccd | 35 | |
36 | /* | |
37 | * TODO: We probably do not track all the cases when | |
38 | * storeDigestNoteStoreReady() must be called; this may prevent | |
39 | * storeDigestRebuild/write schedule to be activated | |
40 | */ | |
41 | ||
8638fc66 | 42 | #include "squid.h" |
43 | ||
d6fd3381 | 44 | #if USE_CACHE_DIGESTS |
45 | ||
46 | /* | |
47 | * local types | |
48 | */ | |
12784378 | 49 | |
12784378 | 50 | typedef struct { |
51 | StoreDigestCBlock cblock; | |
b644367b | 52 | int rebuild_lock; /* bucket number */ |
53 | StoreEntry *rewrite_lock; /* store entry with the digest */ | |
12784378 | 54 | int rebuild_offset; |
55 | int rewrite_offset; | |
56 | int rebuild_count; | |
b644367b | 57 | int rewrite_count; |
12784378 | 58 | } StoreDigestState; |
59 | ||
6168bccd | 60 | typedef struct { |
4b4cd312 | 61 | int del_count; /* #store entries deleted from store_digest */ |
62 | int del_lost_count; /* #store entries not found in store_digest on delete */ | |
63 | int add_count; /* #store entries accepted to store_digest */ | |
64 | int add_coll_count; /* #accepted entries that collided with existing ones */ | |
65 | int rej_count; /* #store entries not accepted to store_digest */ | |
66 | int rej_coll_count; /* #not accepted entries that collided with existing ones */ | |
6168bccd | 67 | } StoreDigestStats; |
68 | ||
462f66d2 | 69 | /* local vars */ |
12784378 | 70 | static StoreDigestState sd_state; |
6168bccd | 71 | static StoreDigestStats sd_stats; |
12784378 | 72 | |
73 | /* local prototypes */ | |
6168bccd | 74 | static void storeDigestRebuildStart(void *datanotused); |
d6fd3381 | 75 | static void storeDigestRebuildResume(void); |
76 | static void storeDigestRebuildFinish(void); | |
12784378 | 77 | static void storeDigestRebuildStep(void *datanotused); |
d6fd3381 | 78 | static void storeDigestRewriteStart(void *); |
79 | static void storeDigestRewriteResume(void); | |
b644367b | 80 | static void storeDigestRewriteFinish(StoreEntry * e); |
d6f51e3c | 81 | static EVH storeDigestSwapOutStep; |
b644367b | 82 | static void storeDigestCBlockSwapOut(StoreEntry * e); |
d6fd3381 | 83 | static int storeDigestCalcCap(void); |
84 | static int storeDigestResize(void); | |
85 | static void storeDigestAdd(const StoreEntry *); | |
12784378 | 86 | |
d6fd3381 | 87 | #endif /* USE_CACHE_DIGESTS */ |
88 | ||
89 | /* | |
90 | * PUBLIC FUNCTIONS | |
91 | */ | |
12784378 | 92 | |
8638fc66 | 93 | void |
d6fd3381 | 94 | storeDigestInit(void) |
8638fc66 | 95 | { |
6cfa8966 | 96 | #if USE_CACHE_DIGESTS |
304b267e | 97 | const int cap = storeDigestCalcCap(); |
7e3ce7b9 | 98 | |
99 | if (!Config.onoff.digest_generation) { | |
100 | store_digest = NULL; | |
101 | debug(71, 3) ("Local cache digest generation disabled\n"); | |
102 | return; | |
103 | } | |
104 | store_digest = cacheDigestCreate(cap, Config.digest.bits_per_entry); | |
6168bccd | 105 | debug(71, 1) ("Local cache digest enabled; rebuild/rewrite every %d/%d sec\n", |
7e3ce7b9 | 106 | Config.digest.rebuild_period, Config.digest.rewrite_period); |
12784378 | 107 | memset(&sd_state, 0, sizeof(sd_state)); |
8638fc66 | 108 | cachemgrRegister("store_digest", "Store Digest", |
1da3b90b | 109 | storeDigestReport, 0, 1); |
d6fd3381 | 110 | #else |
111 | store_digest = NULL; | |
112 | debug(71, 3) ("Local cache digest is 'off'\n"); | |
113 | #endif | |
8638fc66 | 114 | } |
115 | ||
6168bccd | 116 | /* called when store_rebuild completes */ |
8638fc66 | 117 | void |
d6fd3381 | 118 | storeDigestNoteStoreReady(void) |
12784378 | 119 | { |
d6fd3381 | 120 | #if USE_CACHE_DIGESTS |
7e3ce7b9 | 121 | if (Config.onoff.digest_generation) { |
122 | storeDigestRebuildStart(NULL); | |
123 | storeDigestRewriteStart(NULL); | |
124 | } | |
d6fd3381 | 125 | #endif |
126 | } | |
127 | ||
128 | void | |
129 | storeDigestDel(const StoreEntry * entry) | |
130 | { | |
131 | #if USE_CACHE_DIGESTS | |
7e3ce7b9 | 132 | if (!Config.onoff.digest_generation) { |
133 | return; | |
134 | } | |
d6fd3381 | 135 | assert(entry && store_digest); |
136 | debug(71, 6) ("storeDigestDel: checking entry, key: %s\n", | |
137 | storeKeyText(entry->key)); | |
d46a87a8 | 138 | if (!EBIT_TEST(entry->flags, KEY_PRIVATE)) { |
d6fd3381 | 139 | if (!cacheDigestTest(store_digest, entry->key)) { |
140 | sd_stats.del_lost_count++; | |
141 | debug(71, 6) ("storeDigestDel: lost entry, key: %s url: %s\n", | |
142 | storeKeyText(entry->key), storeUrl(entry)); | |
143 | } else { | |
144 | sd_stats.del_count++; | |
145 | cacheDigestDel(store_digest, entry->key); | |
146 | debug(71, 6) ("storeDigestDel: deled entry, key: %s\n", | |
147 | storeKeyText(entry->key)); | |
148 | } | |
149 | } | |
150 | #endif | |
12784378 | 151 | } |
152 | ||
12784378 | 153 | void |
d6fd3381 | 154 | storeDigestReport(StoreEntry * e) |
155 | { | |
156 | #if USE_CACHE_DIGESTS | |
7e3ce7b9 | 157 | if (!Config.onoff.digest_generation) { |
158 | return; | |
159 | } | |
d6fd3381 | 160 | if (store_digest) { |
161 | cacheDigestReport(store_digest, "store", e); | |
162 | storeAppendPrintf(e, "\t added: %d rejected: %d ( %.2f %%) del-ed: %d\n", | |
163 | sd_stats.add_count, | |
164 | sd_stats.rej_count, | |
165 | xpercent(sd_stats.rej_count, sd_stats.rej_count + sd_stats.add_count), | |
166 | sd_stats.del_count); | |
167 | storeAppendPrintf(e, "\t collisions: on add: %.2f %% on rej: %.2f %%\n", | |
168 | xpercent(sd_stats.add_coll_count, sd_stats.add_count), | |
169 | xpercent(sd_stats.rej_coll_count, sd_stats.rej_count)); | |
170 | } else { | |
171 | storeAppendPrintf(e, "store digest: disabled.\n"); | |
172 | } | |
173 | #endif | |
174 | } | |
175 | ||
176 | /* | |
177 | * LOCAL FUNCTIONS | |
178 | */ | |
179 | ||
180 | #if USE_CACHE_DIGESTS | |
181 | ||
c68e9c6b | 182 | /* should we digest this entry? used by storeDigestAdd() */ |
183 | static int | |
184 | storeDigestAddable(const StoreEntry * e) | |
185 | { | |
186 | /* add some stats! XXX */ | |
187 | ||
188 | debug(71, 6) ("storeDigestAddable: checking entry, key: %s\n", | |
189 | storeKeyText(e->key)); | |
190 | ||
191 | /* check various entry flags (mimics storeCheckCachable XXX) */ | |
192 | if (!EBIT_TEST(e->flags, ENTRY_CACHABLE)) { | |
193 | debug(71, 6) ("storeDigestAddable: NO: not cachable\n"); | |
194 | return 0; | |
195 | } | |
196 | if (EBIT_TEST(e->flags, KEY_PRIVATE)) { | |
197 | debug(71, 6) ("storeDigestAddable: NO: private key\n"); | |
198 | return 0; | |
199 | } | |
200 | if (EBIT_TEST(e->flags, ENTRY_NEGCACHED)) { | |
201 | debug(71, 6) ("storeDigestAddable: NO: negative cached\n"); | |
202 | return 0; | |
203 | } | |
204 | if (EBIT_TEST(e->flags, RELEASE_REQUEST)) { | |
205 | debug(71, 6) ("storeDigestAddable: NO: release requested\n"); | |
206 | return 0; | |
207 | } | |
208 | if (e->store_status == STORE_OK && EBIT_TEST(e->flags, ENTRY_BAD_LENGTH)) { | |
209 | debug(71, 6) ("storeDigestAddable: NO: wrong content-length\n"); | |
210 | return 0; | |
211 | } | |
212 | /* do not digest huge objects */ | |
213 | if (e->swap_file_sz > Config.Store.maxObjectSize) { | |
214 | debug(71, 6) ("storeDigestAddable: NO: too big\n"); | |
215 | return 0; | |
216 | } | |
217 | /* still here? check staleness */ | |
218 | /* Note: We should use the time of the next rebuild, not (cur_time+period) */ | |
7e3ce7b9 | 219 | if (refreshCheckDigest(e, Config.digest.rebuild_period)) { |
c68e9c6b | 220 | debug(71, 6) ("storeDigestAdd: entry expires within %d secs, ignoring\n", |
7e3ce7b9 | 221 | Config.digest.rebuild_period); |
c68e9c6b | 222 | return 0; |
223 | } | |
17a80fc2 | 224 | /* |
225 | * idea: how about also skipping very fresh (thus, potentially | |
226 | * unstable) entries? Should be configurable through | |
227 | * cd_refresh_pattern, of course. | |
228 | */ | |
229 | /* | |
230 | * idea: skip objects that are going to be purged before the next | |
231 | * update. | |
232 | */ | |
233 | #if !HEAP_REPLACEMENT | |
234 | if ((squid_curtime + StoreDigestRebuildPeriod) - e->lastref > storeExpiredReferenceAge()) | |
235 | return 0; | |
236 | #endif | |
c68e9c6b | 237 | return 1; |
238 | } | |
239 | ||
d6fd3381 | 240 | static void |
6168bccd | 241 | storeDigestAdd(const StoreEntry * entry) |
242 | { | |
6168bccd | 243 | assert(entry && store_digest); |
c68e9c6b | 244 | |
245 | if (storeDigestAddable(entry)) { | |
6168bccd | 246 | sd_stats.add_count++; |
247 | if (cacheDigestTest(store_digest, entry->key)) | |
248 | sd_stats.add_coll_count++; | |
249 | cacheDigestAdd(store_digest, entry->key); | |
4b4cd312 | 250 | debug(71, 6) ("storeDigestAdd: added entry, key: %s\n", |
6168bccd | 251 | storeKeyText(entry->key)); |
252 | } else { | |
253 | sd_stats.rej_count++; | |
254 | if (cacheDigestTest(store_digest, entry->key)) | |
255 | sd_stats.rej_coll_count++; | |
256 | } | |
12784378 | 257 | } |
258 | ||
12784378 | 259 | /* rebuilds digest from scratch */ |
260 | static void | |
6168bccd | 261 | storeDigestRebuildStart(void *datanotused) |
8638fc66 | 262 | { |
263 | assert(store_digest); | |
12784378 | 264 | /* prevent overlapping if rebuild schedule is too tight */ |
265 | if (sd_state.rebuild_lock) { | |
6168bccd | 266 | debug(71, 1) ("storeDigestRebuildStart: overlap detected, consider increasing rebuild period\n"); |
12784378 | 267 | return; |
268 | } | |
269 | sd_state.rebuild_lock = 1; | |
6168bccd | 270 | debug(71, 2) ("storeDigestRebuildStart: rebuild #%d\n", sd_state.rebuild_count + 1); |
271 | if (sd_state.rewrite_lock) { | |
272 | debug(71, 2) ("storeDigestRebuildStart: waiting for Rewrite to finish.\n"); | |
273 | return; | |
274 | } | |
275 | storeDigestRebuildResume(); | |
276 | } | |
277 | ||
278 | /* called be Rewrite to push Rebuild forward */ | |
279 | static void | |
d6fd3381 | 280 | storeDigestRebuildResume(void) |
6168bccd | 281 | { |
282 | assert(sd_state.rebuild_lock); | |
283 | assert(!sd_state.rewrite_lock); | |
12784378 | 284 | sd_state.rebuild_offset = 0; |
304b267e | 285 | /* resize or clear */ |
286 | if (!storeDigestResize()) | |
4b4cd312 | 287 | cacheDigestClear(store_digest); /* not clean()! */ |
6168bccd | 288 | memset(&sd_stats, 0, sizeof(sd_stats)); |
c43f5247 | 289 | eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1); |
12784378 | 290 | } |
291 | ||
292 | /* finishes swap out sequence for the digest; schedules next rebuild */ | |
293 | static void | |
d6fd3381 | 294 | storeDigestRebuildFinish(void) |
12784378 | 295 | { |
296 | assert(sd_state.rebuild_lock); | |
297 | sd_state.rebuild_lock = 0; | |
298 | sd_state.rebuild_count++; | |
299 | debug(71, 2) ("storeDigestRebuildFinish: done.\n"); | |
7e3ce7b9 | 300 | eventAdd("storeDigestRebuildStart", storeDigestRebuildStart, NULL, (double) |
301 | Config.digest.rebuild_period, 1); | |
6168bccd | 302 | /* resume pending Rewrite if any */ |
12784378 | 303 | if (sd_state.rewrite_lock) |
6168bccd | 304 | storeDigestRewriteResume(); |
12784378 | 305 | } |
306 | ||
307 | /* recalculate a few hash buckets per invocation; schedules next step */ | |
308 | static void | |
309 | storeDigestRebuildStep(void *datanotused) | |
310 | { | |
7e3ce7b9 | 311 | int bcount = (int) ceil((double) store_hash_buckets * |
312 | (double) Config.digest.rebuild_chunk_percentage / 100.0); | |
12784378 | 313 | assert(sd_state.rebuild_lock); |
314 | if (sd_state.rebuild_offset + bcount > store_hash_buckets) | |
315 | bcount = store_hash_buckets - sd_state.rebuild_offset; | |
316 | debug(71, 3) ("storeDigestRebuildStep: buckets: %d offset: %d chunk: %d buckets\n", | |
317 | store_hash_buckets, sd_state.rebuild_offset, bcount); | |
318 | while (bcount--) { | |
319 | hash_link *link_ptr = hash_get_bucket(store_table, sd_state.rebuild_offset); | |
320 | for (; link_ptr; link_ptr = link_ptr->next) { | |
6168bccd | 321 | storeDigestAdd((StoreEntry *) link_ptr); |
12784378 | 322 | } |
323 | sd_state.rebuild_offset++; | |
324 | } | |
325 | /* are we done ? */ | |
326 | if (sd_state.rebuild_offset >= store_hash_buckets) | |
327 | storeDigestRebuildFinish(); | |
328 | else | |
c43f5247 | 329 | eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1); |
12784378 | 330 | } |
331 | ||
332 | ||
333 | /* starts swap out sequence for the digest */ | |
334 | static void | |
6168bccd | 335 | storeDigestRewriteStart(void *datanotused) |
12784378 | 336 | { |
92695e5e | 337 | request_flags flags; |
462f66d2 | 338 | char *url; |
6168bccd | 339 | StoreEntry *e; |
12784378 | 340 | |
341 | assert(store_digest); | |
342 | /* prevent overlapping if rewrite schedule is too tight */ | |
343 | if (sd_state.rewrite_lock) { | |
344 | debug(71, 1) ("storeDigestRewrite: overlap detected, consider increasing rewrite period\n"); | |
345 | return; | |
346 | } | |
b644367b | 347 | debug(71, 2) ("storeDigestRewrite: start rewrite #%d\n", sd_state.rewrite_count + 1); |
12784378 | 348 | /* make new store entry */ |
e13ee7ad | 349 | url = internalLocalUri("/squid-internal-periodic/", StoreDigestFileName); |
92695e5e | 350 | flags = null_request_flags; |
351 | flags.cachable = 1; | |
6168bccd | 352 | sd_state.rewrite_lock = e = storeCreateEntry(url, url, flags, METHOD_GET); |
353 | assert(sd_state.rewrite_lock); | |
db1cd23c | 354 | cbdataAdd(sd_state.rewrite_lock, NULL, 0); |
6168bccd | 355 | debug(71, 3) ("storeDigestRewrite: url: %s key: %s\n", url, storeKeyText(e->key)); |
356 | e->mem_obj->request = requestLink(urlParse(METHOD_GET, url)); | |
357 | /* wait for rebuild (if any) to finish */ | |
358 | if (sd_state.rebuild_lock) { | |
359 | debug(71, 2) ("storeDigestRewriteStart: waiting for rebuild to finish.\n"); | |
360 | return; | |
361 | } | |
362 | storeDigestRewriteResume(); | |
363 | } | |
364 | ||
365 | static void | |
d6fd3381 | 366 | storeDigestRewriteResume(void) |
6168bccd | 367 | { |
368 | StoreEntry *e = sd_state.rewrite_lock; | |
369 | ||
370 | assert(sd_state.rewrite_lock); | |
371 | assert(!sd_state.rebuild_lock); | |
12784378 | 372 | sd_state.rewrite_offset = 0; |
d46a87a8 | 373 | EBIT_SET(e->flags, ENTRY_SPECIAL); |
462f66d2 | 374 | /* setting public key will purge old digest entry if any */ |
12784378 | 375 | storeSetPublicKey(e); |
462f66d2 | 376 | /* fake reply */ |
12784378 | 377 | httpReplyReset(e->mem_obj->reply); |
378 | httpReplySetHeaders(e->mem_obj->reply, 1.0, 200, "Cache Digest OK", | |
b644367b | 379 | "application/cache-digest", store_digest->mask_size + sizeof(sd_state.cblock), |
7e3ce7b9 | 380 | squid_curtime, squid_curtime + Config.digest.rewrite_period); |
8a6218c6 | 381 | debug(71, 3) ("storeDigestRewrite: entry expires on %d (%+d)\n", |
382 | e->mem_obj->reply->expires, e->mem_obj->reply->expires - squid_curtime); | |
12784378 | 383 | storeBuffer(e); |
384 | httpReplySwapOut(e->mem_obj->reply, e); | |
385 | storeDigestCBlockSwapOut(e); | |
386 | storeBufferFlush(e); | |
c43f5247 | 387 | eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, sd_state.rewrite_lock, 0.0, 1); |
12784378 | 388 | } |
389 | ||
390 | /* finishes swap out sequence for the digest; schedules next rewrite */ | |
391 | static void | |
b644367b | 392 | storeDigestRewriteFinish(StoreEntry * e) |
12784378 | 393 | { |
12784378 | 394 | assert(e == sd_state.rewrite_lock); |
395 | storeComplete(e); | |
396 | storeTimestampsSet(e); | |
e13ee7ad | 397 | debug(71, 2) ("storeDigestRewriteFinish: digest expires at %d (%+d)\n", |
8a6218c6 | 398 | e->expires, e->expires - squid_curtime); |
6168bccd | 399 | /* is this the write order? @?@ */ |
400 | requestUnlink(e->mem_obj->request); | |
401 | e->mem_obj->request = NULL; | |
12784378 | 402 | storeUnlockObject(e); |
52040193 | 403 | /* |
404 | * note, it won't really get free()'d here because we used | |
405 | * MEM_DONTFREE in the call to cbdataAdd(). | |
406 | */ | |
d90c79ee | 407 | cbdataFree(sd_state.rewrite_lock); |
462f66d2 | 408 | sd_state.rewrite_lock = e = NULL; |
12784378 | 409 | sd_state.rewrite_count++; |
7e3ce7b9 | 410 | eventAdd("storeDigestRewriteStart", storeDigestRewriteStart, NULL, (double) |
411 | Config.digest.rewrite_period, 1); | |
6168bccd | 412 | /* resume pending Rebuild if any */ |
413 | if (sd_state.rebuild_lock) | |
414 | storeDigestRebuildResume(); | |
12784378 | 415 | } |
416 | ||
417 | /* swaps out one digest "chunk" per invocation; schedules next swap out */ | |
418 | static void | |
52040193 | 419 | storeDigestSwapOutStep(void *data) |
12784378 | 420 | { |
52040193 | 421 | StoreEntry *e = data; |
7e3ce7b9 | 422 | int chunk_size = Config.digest.swapout_chunk_size; |
12784378 | 423 | assert(e); |
12784378 | 424 | assert(e == sd_state.rewrite_lock); |
12784378 | 425 | /* _add_ check that nothing bad happened while we were waiting @?@ @?@ */ |
426 | if (sd_state.rewrite_offset + chunk_size > store_digest->mask_size) | |
427 | chunk_size = store_digest->mask_size - sd_state.rewrite_offset; | |
428 | storeAppend(e, store_digest->mask + sd_state.rewrite_offset, chunk_size); | |
429 | debug(71, 3) ("storeDigestSwapOutStep: size: %d offset: %d chunk: %d bytes\n", | |
430 | store_digest->mask_size, sd_state.rewrite_offset, chunk_size); | |
431 | sd_state.rewrite_offset += chunk_size; | |
432 | /* are we done ? */ | |
433 | if (sd_state.rewrite_offset >= store_digest->mask_size) | |
434 | storeDigestRewriteFinish(e); | |
435 | else | |
c43f5247 | 436 | eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, e, 0.0, 1); |
12784378 | 437 | } |
438 | ||
439 | static void | |
b644367b | 440 | storeDigestCBlockSwapOut(StoreEntry * e) |
12784378 | 441 | { |
12784378 | 442 | memset(&sd_state.cblock, 0, sizeof(sd_state.cblock)); |
462f66d2 | 443 | sd_state.cblock.ver.current = htons(CacheDigestVer.current); |
444 | sd_state.cblock.ver.required = htons(CacheDigestVer.required); | |
445 | sd_state.cblock.capacity = htonl(store_digest->capacity); | |
446 | sd_state.cblock.count = htonl(store_digest->count); | |
447 | sd_state.cblock.del_count = htonl(store_digest->del_count); | |
448 | sd_state.cblock.mask_size = htonl(store_digest->mask_size); | |
7e3ce7b9 | 449 | sd_state.cblock.bits_per_entry = (unsigned char) |
450 | Config.digest.bits_per_entry; | |
6168bccd | 451 | sd_state.cblock.hash_func_count = (unsigned char) CacheDigestHashFuncCount; |
4b4cd312 | 452 | storeAppend(e, (char *) &sd_state.cblock, sizeof(sd_state.cblock)); |
8638fc66 | 453 | } |
454 | ||
304b267e | 455 | /* calculates digest capacity */ |
456 | static int | |
d6fd3381 | 457 | storeDigestCalcCap(void) |
304b267e | 458 | { |
459 | /* | |
460 | * To-Do: Bloom proved that the optimal filter utilization is 50% (half of | |
461 | * the bits are off). However, we do not have a formula to calculate the | |
462 | * number of _entries_ we want to pre-allocate for. | |
463 | */ | |
6168bccd | 464 | const int hi_cap = Config.Swap.maxSize / Config.Store.avgObjectSize; |
465 | const int lo_cap = 1 + store_swap_size / Config.Store.avgObjectSize; | |
466 | const int e_count = memInUse(MEM_STOREENTRY); | |
467 | int cap = e_count ? e_count : hi_cap; | |
468 | debug(71, 2) ("storeDigestCalcCap: have: %d, want %d entries; limits: [%d, %d]\n", | |
469 | e_count, cap, lo_cap, hi_cap); | |
304b267e | 470 | if (cap < lo_cap) |
471 | cap = lo_cap; | |
6168bccd | 472 | /* do not enforce hi_cap limit, average-based estimation may be wrong |
473 | *if (cap > hi_cap) | |
4b4cd312 | 474 | * cap = hi_cap; |
6168bccd | 475 | */ |
476 | return cap; | |
304b267e | 477 | } |
478 | ||
479 | /* returns true if we actually resized the digest */ | |
480 | static int | |
d6fd3381 | 481 | storeDigestResize(void) |
304b267e | 482 | { |
483 | const int cap = storeDigestCalcCap(); | |
484 | int diff; | |
485 | assert(store_digest); | |
486 | diff = abs(cap - store_digest->capacity); | |
487 | debug(71, 2) ("storeDigestResize: %d -> %d; change: %d (%d%%)\n", | |
488 | store_digest->capacity, cap, diff, | |
489 | xpercentInt(diff, store_digest->capacity)); | |
490 | /* avoid minor adjustments */ | |
4b4cd312 | 491 | if (diff <= store_digest->capacity / 10) { |
6168bccd | 492 | debug(71, 2) ("storeDigestResize: small change, will not resize.\n"); |
493 | return 0; | |
304b267e | 494 | } else { |
6168bccd | 495 | debug(71, 2) ("storeDigestResize: big change, resizing.\n"); |
304b267e | 496 | cacheDigestChangeCap(store_digest, cap); |
497 | return 1; | |
498 | } | |
499 | } | |
500 | ||
d6fd3381 | 501 | #endif /* USE_CACHE_DIGESTS */ |