]>
Commit | Line | Data |
---|---|---|
8638fc66 | 1 | /* |
bf95c10a | 2 | * Copyright (C) 1996-2022 The Squid Software Foundation and contributors |
e25c139f | 3 | * |
bbc27441 AJ |
4 | * Squid software is distributed under GPLv2+ license and includes |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
8638fc66 | 7 | */ |
8 | ||
bbc27441 AJ |
9 | /* DEBUG: section 71 Store Digest Manager */ |
10 | ||
6168bccd | 11 | /* |
12 | * TODO: We probably do not track all the cases when | |
13 | * storeDigestNoteStoreReady() must be called; this may prevent | |
14 | * storeDigestRebuild/write schedule to be activated | |
15 | */ | |
16 | ||
582c2af2 | 17 | #include "squid.h" |
675b8408 | 18 | #include "debug/Stream.h" |
a553a5a3 | 19 | #include "event.h" |
582c2af2 | 20 | #include "globals.h" |
8822ebee | 21 | #include "mgr/Registration.h" |
35a28a37 | 22 | #include "store_digest.h" |
d6fd3381 | 23 | |
b814e8d4 FC |
24 | #if USE_CACHE_DIGESTS |
25 | #include "CacheDigest.h" | |
528b2c61 | 26 | #include "HttpReply.h" |
582c2af2 | 27 | #include "HttpRequest.h" |
308e60be | 28 | #include "internal.h" |
528b2c61 | 29 | #include "MemObject.h" |
aa839030 | 30 | #include "PeerDigest.h" |
c6f15d40 | 31 | #include "refresh.h" |
4d5904f7 | 32 | #include "SquidConfig.h" |
582c2af2 | 33 | #include "Store.h" |
c8f4eac4 | 34 | #include "StoreSearch.h" |
ed6e9fb9 | 35 | #include "util.h" |
528b2c61 | 36 | |
074d6a40 | 37 | #include <cmath> |
582c2af2 | 38 | |
d6fd3381 | 39 | /* |
40 | * local types | |
41 | */ | |
12784378 | 42 | |
c8f4eac4 | 43 | class StoreDigestState |
62e76326 | 44 | { |
c8f4eac4 | 45 | public: |
12784378 | 46 | StoreDigestCBlock cblock; |
b56b37cf AJ |
47 | int rebuild_lock = 0; ///< bucket number |
48 | StoreEntry * rewrite_lock = nullptr; ///< points to store entry with the digest | |
508590d9 | 49 | StoreEntry * publicEntry = nullptr; ///< points to the previous store entry with the digest |
c8f4eac4 | 50 | StoreSearchPointer theSearch; |
b56b37cf AJ |
51 | int rewrite_offset = 0; |
52 | int rebuild_count = 0; | |
53 | int rewrite_count = 0; | |
c8f4eac4 | 54 | }; |
12784378 | 55 | |
b56b37cf AJ |
56 | class StoreDigestStats |
57 | { | |
58 | public: | |
59 | int del_count = 0; /* #store entries deleted from store_digest */ | |
60 | int del_lost_count = 0; /* #store entries not found in store_digest on delete */ | |
61 | int add_count = 0; /* #store entries accepted to store_digest */ | |
62 | int add_coll_count = 0; /* #accepted entries that collided with existing ones */ | |
63 | int rej_count = 0; /* #store entries not accepted to store_digest */ | |
64 | int rej_coll_count = 0; /* #not accepted entries that collided with existing ones */ | |
65 | }; | |
6168bccd | 66 | |
462f66d2 | 67 | /* local vars */ |
12784378 | 68 | static StoreDigestState sd_state; |
6168bccd | 69 | static StoreDigestStats sd_stats; |
12784378 | 70 | |
71 | /* local prototypes */ | |
6168bccd | 72 | static void storeDigestRebuildStart(void *datanotused); |
d6fd3381 | 73 | static void storeDigestRebuildResume(void); |
74 | static void storeDigestRebuildFinish(void); | |
12784378 | 75 | static void storeDigestRebuildStep(void *datanotused); |
d6fd3381 | 76 | static void storeDigestRewriteStart(void *); |
77 | static void storeDigestRewriteResume(void); | |
b644367b | 78 | static void storeDigestRewriteFinish(StoreEntry * e); |
d6f51e3c | 79 | static EVH storeDigestSwapOutStep; |
b644367b | 80 | static void storeDigestCBlockSwapOut(StoreEntry * e); |
d6fd3381 | 81 | static void storeDigestAdd(const StoreEntry *); |
12784378 | 82 | |
831e953c AJ |
83 | /// calculates digest capacity |
84 | static uint64_t | |
85 | storeDigestCalcCap() | |
86 | { | |
87 | /* | |
88 | * To-Do: Bloom proved that the optimal filter utilization is 50% (half of | |
89 | * the bits are off). However, we do not have a formula to calculate the | |
90 | * number of _entries_ we want to pre-allocate for. | |
91 | */ | |
92 | const uint64_t hi_cap = Store::Root().maxSize() / Config.Store.avgObjectSize; | |
93 | const uint64_t lo_cap = 1 + Store::Root().currentSize() / Config.Store.avgObjectSize; | |
94 | const uint64_t e_count = StoreEntry::inUseCount(); | |
95 | uint64_t cap = e_count ? e_count : hi_cap; | |
96 | debugs(71, 2, "have: " << e_count << ", want " << cap << | |
97 | " entries; limits: [" << lo_cap << ", " << hi_cap << "]"); | |
98 | ||
99 | if (cap < lo_cap) | |
100 | cap = lo_cap; | |
101 | ||
102 | /* do not enforce hi_cap limit, average-based estimation may be wrong | |
103 | *if (cap > hi_cap) | |
104 | * cap = hi_cap; | |
105 | */ | |
106 | ||
107 | // Bug 4534: we still have to set an upper-limit at some reasonable value though. | |
108 | // this matches cacheDigestCalcMaskSize doing (cap*bpe)+7 < INT_MAX | |
109 | const uint64_t absolute_max = (INT_MAX -8) / Config.digest.bits_per_entry; | |
110 | if (cap > absolute_max) { | |
6008010b AJ |
111 | static time_t last_loud = 0; |
112 | if (last_loud < squid_curtime - 86400) { | |
113 | debugs(71, DBG_IMPORTANT, "WARNING: Cache Digest cannot store " << cap << " entries. Limiting to " << absolute_max); | |
114 | last_loud = squid_curtime; | |
115 | } else { | |
116 | debugs(71, 3, "WARNING: Cache Digest cannot store " << cap << " entries. Limiting to " << absolute_max); | |
117 | } | |
831e953c AJ |
118 | cap = absolute_max; |
119 | } | |
120 | ||
121 | return cap; | |
122 | } | |
123 | #endif /* USE_CACHE_DIGESTS */ | |
12784378 | 124 | |
8638fc66 | 125 | void |
d6fd3381 | 126 | storeDigestInit(void) |
8638fc66 | 127 | { |
8f0386af | 128 | Mgr::RegisterAction("store_digest", "Store Digest", storeDigestReport, 0, 1); |
d120ed12 | 129 | |
6cfa8966 | 130 | #if USE_CACHE_DIGESTS |
7e3ce7b9 | 131 | if (!Config.onoff.digest_generation) { |
62e76326 | 132 | store_digest = NULL; |
bf8fe701 | 133 | debugs(71, 3, "Local cache digest generation disabled"); |
62e76326 | 134 | return; |
7e3ce7b9 | 135 | } |
62e76326 | 136 | |
831e953c | 137 | const uint64_t cap = storeDigestCalcCap(); |
e04fc9d3 | 138 | store_digest = new CacheDigest(cap, Config.digest.bits_per_entry); |
e0236918 | 139 | debugs(71, DBG_IMPORTANT, "Local cache digest enabled; rebuild/rewrite every " << |
bf8fe701 | 140 | (int) Config.digest.rebuild_period << "/" << |
141 | (int) Config.digest.rewrite_period << " sec"); | |
142 | ||
b56b37cf | 143 | sd_state = StoreDigestState(); |
d6fd3381 | 144 | #else |
145 | store_digest = NULL; | |
bf8fe701 | 146 | debugs(71, 3, "Local cache digest is 'off'"); |
d6fd3381 | 147 | #endif |
8638fc66 | 148 | } |
149 | ||
6168bccd | 150 | /* called when store_rebuild completes */ |
8638fc66 | 151 | void |
d6fd3381 | 152 | storeDigestNoteStoreReady(void) |
12784378 | 153 | { |
d6fd3381 | 154 | #if USE_CACHE_DIGESTS |
62e76326 | 155 | |
7e3ce7b9 | 156 | if (Config.onoff.digest_generation) { |
62e76326 | 157 | storeDigestRebuildStart(NULL); |
158 | storeDigestRewriteStart(NULL); | |
7e3ce7b9 | 159 | } |
62e76326 | 160 | |
d6fd3381 | 161 | #endif |
162 | } | |
163 | ||
35a28a37 | 164 | //TODO: this seems to be dead code. Is it needed? |
d6fd3381 | 165 | void |
166 | storeDigestDel(const StoreEntry * entry) | |
167 | { | |
168 | #if USE_CACHE_DIGESTS | |
62e76326 | 169 | |
7e3ce7b9 | 170 | if (!Config.onoff.digest_generation) { |
62e76326 | 171 | return; |
7e3ce7b9 | 172 | } |
62e76326 | 173 | |
d6fd3381 | 174 | assert(entry && store_digest); |
bf8fe701 | 175 | debugs(71, 6, "storeDigestDel: checking entry, key: " << entry->getMD5Text()); |
62e76326 | 176 | |
d46a87a8 | 177 | if (!EBIT_TEST(entry->flags, KEY_PRIVATE)) { |
6fc4e508 | 178 | if (!store_digest->contains(static_cast<const cache_key *>(entry->key))) { |
5db6bf73 | 179 | ++sd_stats.del_lost_count; |
bf8fe701 | 180 | debugs(71, 6, "storeDigestDel: lost entry, key: " << entry->getMD5Text() << " url: " << entry->url() ); |
62e76326 | 181 | } else { |
5db6bf73 | 182 | ++sd_stats.del_count; |
fbba122c | 183 | store_digest->remove(static_cast<const cache_key *>(entry->key)); |
bf8fe701 | 184 | debugs(71, 6, "storeDigestDel: deled entry, key: " << entry->getMD5Text()); |
62e76326 | 185 | } |
d6fd3381 | 186 | } |
8b082ed9 FC |
187 | #else |
188 | (void)entry; | |
35a28a37 | 189 | #endif //USE_CACHE_DIGESTS |
12784378 | 190 | } |
191 | ||
12784378 | 192 | void |
d6fd3381 | 193 | storeDigestReport(StoreEntry * e) |
194 | { | |
195 | #if USE_CACHE_DIGESTS | |
62e76326 | 196 | |
7e3ce7b9 | 197 | if (!Config.onoff.digest_generation) { |
62e76326 | 198 | return; |
7e3ce7b9 | 199 | } |
62e76326 | 200 | |
d6fd3381 | 201 | if (store_digest) { |
0e3b8c9f AJ |
202 | static const SBuf label("store"); |
203 | cacheDigestReport(store_digest, label, e); | |
62e76326 | 204 | storeAppendPrintf(e, "\t added: %d rejected: %d ( %.2f %%) del-ed: %d\n", |
205 | sd_stats.add_count, | |
206 | sd_stats.rej_count, | |
207 | xpercent(sd_stats.rej_count, sd_stats.rej_count + sd_stats.add_count), | |
208 | sd_stats.del_count); | |
209 | storeAppendPrintf(e, "\t collisions: on add: %.2f %% on rej: %.2f %%\n", | |
210 | xpercent(sd_stats.add_coll_count, sd_stats.add_count), | |
211 | xpercent(sd_stats.rej_coll_count, sd_stats.rej_count)); | |
d6fd3381 | 212 | } else { |
62e76326 | 213 | storeAppendPrintf(e, "store digest: disabled.\n"); |
d6fd3381 | 214 | } |
8b082ed9 FC |
215 | #else |
216 | (void)e; | |
35a28a37 | 217 | #endif //USE_CACHE_DIGESTS |
d6fd3381 | 218 | } |
219 | ||
220 | /* | |
221 | * LOCAL FUNCTIONS | |
222 | */ | |
223 | ||
224 | #if USE_CACHE_DIGESTS | |
225 | ||
c68e9c6b | 226 | /* should we digest this entry? used by storeDigestAdd() */ |
227 | static int | |
228 | storeDigestAddable(const StoreEntry * e) | |
229 | { | |
230 | /* add some stats! XXX */ | |
231 | ||
bf8fe701 | 232 | debugs(71, 6, "storeDigestAddable: checking entry, key: " << e->getMD5Text()); |
c68e9c6b | 233 | |
3900307b | 234 | /* check various entry flags (mimics StoreEntry::checkCachable XXX) */ |
62e76326 | 235 | |
c68e9c6b | 236 | if (EBIT_TEST(e->flags, KEY_PRIVATE)) { |
bf8fe701 | 237 | debugs(71, 6, "storeDigestAddable: NO: private key"); |
62e76326 | 238 | return 0; |
c68e9c6b | 239 | } |
62e76326 | 240 | |
c68e9c6b | 241 | if (EBIT_TEST(e->flags, ENTRY_NEGCACHED)) { |
bf8fe701 | 242 | debugs(71, 6, "storeDigestAddable: NO: negative cached"); |
62e76326 | 243 | return 0; |
c68e9c6b | 244 | } |
62e76326 | 245 | |
c68e9c6b | 246 | if (EBIT_TEST(e->flags, RELEASE_REQUEST)) { |
bf8fe701 | 247 | debugs(71, 6, "storeDigestAddable: NO: release requested"); |
62e76326 | 248 | return 0; |
c68e9c6b | 249 | } |
62e76326 | 250 | |
c68e9c6b | 251 | if (e->store_status == STORE_OK && EBIT_TEST(e->flags, ENTRY_BAD_LENGTH)) { |
bf8fe701 | 252 | debugs(71, 6, "storeDigestAddable: NO: wrong content-length"); |
62e76326 | 253 | return 0; |
c68e9c6b | 254 | } |
62e76326 | 255 | |
c68e9c6b | 256 | /* do not digest huge objects */ |
47f6e231 | 257 | if (e->swap_file_sz > (uint64_t )Config.Store.maxObjectSize) { |
bf8fe701 | 258 | debugs(71, 6, "storeDigestAddable: NO: too big"); |
62e76326 | 259 | return 0; |
c68e9c6b | 260 | } |
62e76326 | 261 | |
c68e9c6b | 262 | /* still here? check staleness */ |
263 | /* Note: We should use the time of the next rebuild, not (cur_time+period) */ | |
7e3ce7b9 | 264 | if (refreshCheckDigest(e, Config.digest.rebuild_period)) { |
4a7a3d56 | 265 | debugs(71, 6, "storeDigestAdd: entry expires within " << Config.digest.rebuild_period << " secs, ignoring"); |
62e76326 | 266 | return 0; |
c68e9c6b | 267 | } |
62e76326 | 268 | |
17a80fc2 | 269 | /* |
270 | * idea: how about also skipping very fresh (thus, potentially | |
271 | * unstable) entries? Should be configurable through | |
272 | * cd_refresh_pattern, of course. | |
273 | */ | |
274 | /* | |
275 | * idea: skip objects that are going to be purged before the next | |
276 | * update. | |
277 | */ | |
c68e9c6b | 278 | return 1; |
279 | } | |
280 | ||
d6fd3381 | 281 | static void |
6168bccd | 282 | storeDigestAdd(const StoreEntry * entry) |
283 | { | |
6168bccd | 284 | assert(entry && store_digest); |
c68e9c6b | 285 | |
286 | if (storeDigestAddable(entry)) { | |
5db6bf73 | 287 | ++sd_stats.add_count; |
62e76326 | 288 | |
6fc4e508 | 289 | if (store_digest->contains(static_cast<const cache_key *>(entry->key))) |
5db6bf73 | 290 | ++sd_stats.add_coll_count; |
62e76326 | 291 | |
fbba122c | 292 | store_digest->add(static_cast<const cache_key *>(entry->key)); |
62e76326 | 293 | |
bf8fe701 | 294 | debugs(71, 6, "storeDigestAdd: added entry, key: " << entry->getMD5Text()); |
6168bccd | 295 | } else { |
5db6bf73 | 296 | ++sd_stats.rej_count; |
62e76326 | 297 | |
6fc4e508 | 298 | if (store_digest->contains(static_cast<const cache_key *>(entry->key))) |
5db6bf73 | 299 | ++sd_stats.rej_coll_count; |
6168bccd | 300 | } |
12784378 | 301 | } |
302 | ||
12784378 | 303 | /* rebuilds digest from scratch */ |
304 | static void | |
8b082ed9 | 305 | storeDigestRebuildStart(void *) |
8638fc66 | 306 | { |
307 | assert(store_digest); | |
12784378 | 308 | /* prevent overlapping if rebuild schedule is too tight */ |
62e76326 | 309 | |
12784378 | 310 | if (sd_state.rebuild_lock) { |
e0236918 | 311 | debugs(71, DBG_IMPORTANT, "storeDigestRebuildStart: overlap detected, consider increasing rebuild period"); |
62e76326 | 312 | return; |
12784378 | 313 | } |
62e76326 | 314 | |
12784378 | 315 | sd_state.rebuild_lock = 1; |
bf8fe701 | 316 | debugs(71, 2, "storeDigestRebuildStart: rebuild #" << sd_state.rebuild_count + 1); |
62e76326 | 317 | |
6168bccd | 318 | if (sd_state.rewrite_lock) { |
bf8fe701 | 319 | debugs(71, 2, "storeDigestRebuildStart: waiting for Rewrite to finish."); |
62e76326 | 320 | return; |
6168bccd | 321 | } |
62e76326 | 322 | |
6168bccd | 323 | storeDigestRebuildResume(); |
324 | } | |
325 | ||
831e953c AJ |
326 | /// \returns true if we actually resized the digest |
327 | static bool | |
328 | storeDigestResize() | |
329 | { | |
330 | const uint64_t cap = storeDigestCalcCap(); | |
331 | assert(store_digest); | |
3faf5197 AJ |
332 | uint64_t diff; |
333 | if (cap > store_digest->capacity) | |
334 | diff = cap - store_digest->capacity; | |
335 | else | |
336 | diff = store_digest->capacity - cap; | |
831e953c AJ |
337 | debugs(71, 2, store_digest->capacity << " -> " << cap << "; change: " << |
338 | diff << " (" << xpercentInt(diff, store_digest->capacity) << "%)" ); | |
339 | /* avoid minor adjustments */ | |
340 | ||
341 | if (diff <= store_digest->capacity / 10) { | |
342 | debugs(71, 2, "small change, will not resize."); | |
343 | return false; | |
344 | } else { | |
345 | debugs(71, 2, "big change, resizing."); | |
346 | store_digest->updateCapacity(cap); | |
347 | } | |
348 | return true; | |
349 | } | |
350 | ||
6168bccd | 351 | /* called be Rewrite to push Rebuild forward */ |
352 | static void | |
d6fd3381 | 353 | storeDigestRebuildResume(void) |
6168bccd | 354 | { |
355 | assert(sd_state.rebuild_lock); | |
356 | assert(!sd_state.rewrite_lock); | |
2745fea5 | 357 | sd_state.theSearch = Store::Root().search(); |
304b267e | 358 | /* resize or clear */ |
62e76326 | 359 | |
304b267e | 360 | if (!storeDigestResize()) |
28faff32 | 361 | store_digest->clear(); /* not clean()! */ |
62e76326 | 362 | |
b56b37cf | 363 | sd_stats = StoreDigestStats(); |
62e76326 | 364 | |
c43f5247 | 365 | eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1); |
12784378 | 366 | } |
367 | ||
368 | /* finishes swap out sequence for the digest; schedules next rebuild */ | |
369 | static void | |
d6fd3381 | 370 | storeDigestRebuildFinish(void) |
12784378 | 371 | { |
372 | assert(sd_state.rebuild_lock); | |
373 | sd_state.rebuild_lock = 0; | |
5db6bf73 | 374 | ++sd_state.rebuild_count; |
bf8fe701 | 375 | debugs(71, 2, "storeDigestRebuildFinish: done."); |
7e3ce7b9 | 376 | eventAdd("storeDigestRebuildStart", storeDigestRebuildStart, NULL, (double) |
62e76326 | 377 | Config.digest.rebuild_period, 1); |
6168bccd | 378 | /* resume pending Rewrite if any */ |
62e76326 | 379 | |
12784378 | 380 | if (sd_state.rewrite_lock) |
62e76326 | 381 | storeDigestRewriteResume(); |
12784378 | 382 | } |
383 | ||
384 | /* recalculate a few hash buckets per invocation; schedules next step */ | |
385 | static void | |
8b082ed9 | 386 | storeDigestRebuildStep(void *) |
12784378 | 387 | { |
c8f4eac4 | 388 | /* TODO: call Store::Root().size() to determine this.. */ |
389 | int count = Config.Store.objectsPerBucket * (int) ceil((double) store_hash_buckets * | |
390 | (double) Config.digest.rebuild_chunk_percentage / 100.0); | |
12784378 | 391 | assert(sd_state.rebuild_lock); |
62e76326 | 392 | |
bf8fe701 | 393 | debugs(71, 3, "storeDigestRebuildStep: buckets: " << store_hash_buckets << " entries to check: " << count); |
62e76326 | 394 | |
c8f4eac4 | 395 | while (count-- && !sd_state.theSearch->isDone() && sd_state.theSearch->next()) |
396 | storeDigestAdd(sd_state.theSearch->currentItem()); | |
62e76326 | 397 | |
12784378 | 398 | /* are we done ? */ |
c8f4eac4 | 399 | if (sd_state.theSearch->isDone()) |
62e76326 | 400 | storeDigestRebuildFinish(); |
12784378 | 401 | else |
62e76326 | 402 | eventAdd("storeDigestRebuildStep", storeDigestRebuildStep, NULL, 0.0, 1); |
12784378 | 403 | } |
404 | ||
12784378 | 405 | /* starts swap out sequence for the digest */ |
406 | static void | |
8b082ed9 | 407 | storeDigestRewriteStart(void *) |
12784378 | 408 | { |
12784378 | 409 | assert(store_digest); |
410 | /* prevent overlapping if rewrite schedule is too tight */ | |
62e76326 | 411 | |
12784378 | 412 | if (sd_state.rewrite_lock) { |
e0236918 | 413 | debugs(71, DBG_IMPORTANT, "storeDigestRewrite: overlap detected, consider increasing rewrite period"); |
62e76326 | 414 | return; |
12784378 | 415 | } |
62e76326 | 416 | |
bf8fe701 | 417 | debugs(71, 2, "storeDigestRewrite: start rewrite #" << sd_state.rewrite_count + 1); |
8babada0 AJ |
418 | |
419 | const char *url = internalLocalUri("/squid-internal-periodic/", SBuf(StoreDigestFileName)); | |
420 | const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initCacheDigest); | |
6c880a16 | 421 | auto req = HttpRequest::FromUrlXXX(url, mx); |
8babada0 AJ |
422 | |
423 | RequestFlags flags; | |
e857372a | 424 | flags.cachable = true; |
8babada0 AJ |
425 | |
426 | StoreEntry *e = storeCreateEntry(url, url, flags, Http::METHOD_GET); | |
28c60158 | 427 | assert(e); |
aa839030 | 428 | sd_state.rewrite_lock = e; |
bf8fe701 | 429 | debugs(71, 3, "storeDigestRewrite: url: " << url << " key: " << e->getMD5Text()); |
8babada0 | 430 | e->mem_obj->request = req; |
62e76326 | 431 | |
8babada0 | 432 | /* wait for rebuild (if any) to finish */ |
6168bccd | 433 | if (sd_state.rebuild_lock) { |
bf8fe701 | 434 | debugs(71, 2, "storeDigestRewriteStart: waiting for rebuild to finish."); |
62e76326 | 435 | return; |
6168bccd | 436 | } |
62e76326 | 437 | |
6168bccd | 438 | storeDigestRewriteResume(); |
439 | } | |
440 | ||
441 | static void | |
d6fd3381 | 442 | storeDigestRewriteResume(void) |
6168bccd | 443 | { |
28c60158 | 444 | StoreEntry *e; |
6168bccd | 445 | |
446 | assert(sd_state.rewrite_lock); | |
447 | assert(!sd_state.rebuild_lock); | |
aa839030 | 448 | e = sd_state.rewrite_lock; |
12784378 | 449 | sd_state.rewrite_offset = 0; |
d46a87a8 | 450 | EBIT_SET(e->flags, ENTRY_SPECIAL); |
508590d9 | 451 | /* setting public key will mark the old digest entry for removal once unlocked */ |
d88e3c49 | 452 | e->setPublicKey(); |
508590d9 CG |
453 | if (const auto oldEntry = sd_state.publicEntry) { |
454 | oldEntry->release(true); | |
455 | sd_state.publicEntry = nullptr; | |
456 | oldEntry->unlock("storeDigestRewriteResume"); | |
457 | } | |
458 | assert(e->locked()); | |
459 | sd_state.publicEntry = e; | |
462f66d2 | 460 | /* fake reply */ |
06a5ae20 | 461 | HttpReply *rep = new HttpReply; |
955394ce | 462 | rep->setHeaders(Http::scOkay, "Cache Digest OK", |
11992b6f AJ |
463 | "application/cache-digest", (store_digest->mask_size + sizeof(sd_state.cblock)), |
464 | squid_curtime, (squid_curtime + Config.digest.rewrite_period) ); | |
26ac0430 | 465 | debugs(71, 3, "storeDigestRewrite: entry expires on " << rep->expires << |
bf8fe701 | 466 | " (" << std::showpos << (int) (rep->expires - squid_curtime) << ")"); |
3900307b | 467 | e->buffer(); |
db237875 | 468 | e->replaceHttpReply(rep); |
12784378 | 469 | storeDigestCBlockSwapOut(e); |
3900307b | 470 | e->flush(); |
aa839030 | 471 | eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, sd_state.rewrite_lock, 0.0, 1, false); |
12784378 | 472 | } |
473 | ||
474 | /* finishes swap out sequence for the digest; schedules next rewrite */ | |
475 | static void | |
b644367b | 476 | storeDigestRewriteFinish(StoreEntry * e) |
12784378 | 477 | { |
aa839030 | 478 | assert(e == sd_state.rewrite_lock); |
528b2c61 | 479 | e->complete(); |
3900307b | 480 | e->timestampsSet(); |
26ac0430 | 481 | debugs(71, 2, "storeDigestRewriteFinish: digest expires at " << e->expires << |
bf8fe701 | 482 | " (" << std::showpos << (int) (e->expires - squid_curtime) << ")"); |
6168bccd | 483 | /* is this the write order? @?@ */ |
528b2c61 | 484 | e->mem_obj->unlinkRequest(); |
feefc1d9 | 485 | sd_state.rewrite_lock = NULL; |
5db6bf73 | 486 | ++sd_state.rewrite_count; |
7e3ce7b9 | 487 | eventAdd("storeDigestRewriteStart", storeDigestRewriteStart, NULL, (double) |
62e76326 | 488 | Config.digest.rewrite_period, 1); |
6168bccd | 489 | /* resume pending Rebuild if any */ |
62e76326 | 490 | |
6168bccd | 491 | if (sd_state.rebuild_lock) |
62e76326 | 492 | storeDigestRebuildResume(); |
12784378 | 493 | } |
494 | ||
495 | /* swaps out one digest "chunk" per invocation; schedules next swap out */ | |
496 | static void | |
52040193 | 497 | storeDigestSwapOutStep(void *data) |
12784378 | 498 | { |
aa839030 | 499 | StoreEntry *e = static_cast<StoreEntry *>(data); |
7e3ce7b9 | 500 | int chunk_size = Config.digest.swapout_chunk_size; |
aa839030 | 501 | assert(e == sd_state.rewrite_lock); |
12784378 | 502 | assert(e); |
12784378 | 503 | /* _add_ check that nothing bad happened while we were waiting @?@ @?@ */ |
62e76326 | 504 | |
831e953c | 505 | if (static_cast<uint32_t>(sd_state.rewrite_offset + chunk_size) > store_digest->mask_size) |
62e76326 | 506 | chunk_size = store_digest->mask_size - sd_state.rewrite_offset; |
507 | ||
3900307b | 508 | e->append(store_digest->mask + sd_state.rewrite_offset, chunk_size); |
62e76326 | 509 | |
e4049756 | 510 | debugs(71, 3, "storeDigestSwapOutStep: size: " << store_digest->mask_size << |
511 | " offset: " << sd_state.rewrite_offset << " chunk: " << | |
512 | chunk_size << " bytes"); | |
62e76326 | 513 | |
12784378 | 514 | sd_state.rewrite_offset += chunk_size; |
62e76326 | 515 | |
12784378 | 516 | /* are we done ? */ |
831e953c | 517 | if (static_cast<uint32_t>(sd_state.rewrite_offset) >= store_digest->mask_size) |
62e76326 | 518 | storeDigestRewriteFinish(e); |
12784378 | 519 | else |
aa839030 | 520 | eventAdd("storeDigestSwapOutStep", storeDigestSwapOutStep, data, 0.0, 1, false); |
12784378 | 521 | } |
522 | ||
523 | static void | |
b644367b | 524 | storeDigestCBlockSwapOut(StoreEntry * e) |
12784378 | 525 | { |
12784378 | 526 | memset(&sd_state.cblock, 0, sizeof(sd_state.cblock)); |
462f66d2 | 527 | sd_state.cblock.ver.current = htons(CacheDigestVer.current); |
528 | sd_state.cblock.ver.required = htons(CacheDigestVer.required); | |
529 | sd_state.cblock.capacity = htonl(store_digest->capacity); | |
530 | sd_state.cblock.count = htonl(store_digest->count); | |
531 | sd_state.cblock.del_count = htonl(store_digest->del_count); | |
532 | sd_state.cblock.mask_size = htonl(store_digest->mask_size); | |
831e953c | 533 | sd_state.cblock.bits_per_entry = Config.digest.bits_per_entry; |
6168bccd | 534 | sd_state.cblock.hash_func_count = (unsigned char) CacheDigestHashFuncCount; |
3900307b | 535 | e->append((char *) &sd_state.cblock, sizeof(sd_state.cblock)); |
8638fc66 | 536 | } |
537 | ||
d6fd3381 | 538 | #endif /* USE_CACHE_DIGESTS */ |
f53969cc | 539 |