]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Bug 4505: SMP caches sometimes do not purge entries (#46)
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "base/TextException.h"
13 #include "CacheDigest.h"
14 #include "CacheManager.h"
15 #include "comm/Connection.h"
16 #include "comm/Read.h"
17 #include "ETag.h"
18 #include "event.h"
19 #include "fde.h"
20 #include "globals.h"
21 #include "http.h"
22 #include "HttpReply.h"
23 #include "HttpRequest.h"
24 #include "mem_node.h"
25 #include "MemObject.h"
26 #include "mgr/Registration.h"
27 #include "mgr/StoreIoAction.h"
28 #include "profiler/Profiler.h"
29 #include "repl_modules.h"
30 #include "RequestFlags.h"
31 #include "SquidConfig.h"
32 #include "SquidTime.h"
33 #include "StatCounters.h"
34 #include "stmem.h"
35 #include "Store.h"
36 #include "store/Controller.h"
37 #include "store/Disk.h"
38 #include "store/Disks.h"
39 #include "store_digest.h"
40 #include "store_key_md5.h"
41 #include "store_log.h"
42 #include "store_rebuild.h"
43 #include "StoreClient.h"
44 #include "StoreIOState.h"
45 #include "StoreMeta.h"
46 #include "StrList.h"
47 #include "swap_log_op.h"
48 #include "tools.h"
49 #if USE_DELAY_POOLS
50 #include "DelayPools.h"
51 #endif
52
53 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
54 * XXX: convert to MEMPROXY_CLASS() API
55 */
56 #include "mem/Pool.h"
57
58 #include <climits>
59 #include <stack>
60
61 #define REBUILD_TIMESTAMP_DELTA_MAX 2
62
63 #define STORE_IN_MEM_BUCKETS (229)
64
65 /** \todo Convert these string constants to enum string-arrays generated */
66
67 const char *memStatusStr[] = {
68 "NOT_IN_MEMORY",
69 "IN_MEMORY"
70 };
71
72 const char *pingStatusStr[] = {
73 "PING_NONE",
74 "PING_WAITING",
75 "PING_DONE"
76 };
77
78 const char *storeStatusStr[] = {
79 "STORE_OK",
80 "STORE_PENDING"
81 };
82
83 const char *swapStatusStr[] = {
84 "SWAPOUT_NONE",
85 "SWAPOUT_WRITING",
86 "SWAPOUT_DONE"
87 };
88
89 /*
90 * This defines an repl type
91 */
92
93 typedef struct _storerepl_entry storerepl_entry_t;
94
95 struct _storerepl_entry {
96 const char *typestr;
97 REMOVALPOLICYCREATE *create;
98 };
99
100 static storerepl_entry_t *storerepl_list = NULL;
101
102 /*
103 * local function prototypes
104 */
105 static int getKeyCounter(void);
106 static OBJH storeCheckCachableStats;
107 static EVH storeLateRelease;
108
109 /*
110 * local variables
111 */
112 static std::stack<StoreEntry*> LateReleaseStack;
113 MemAllocator *StoreEntry::pool = NULL;
114
115 void
116 Store::Stats(StoreEntry * output)
117 {
118 assert(output);
119 Root().stat(*output);
120 }
121
122 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
123 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
124 void *
125 StoreEntry::operator new (size_t bytecount)
126 {
127 assert(bytecount == sizeof (StoreEntry));
128
129 if (!pool) {
130 pool = memPoolCreate ("StoreEntry", bytecount);
131 }
132
133 return pool->alloc();
134 }
135
136 void
137 StoreEntry::operator delete (void *address)
138 {
139 pool->freeOne(address);
140 }
141
142 bool
143 StoreEntry::makePublic(const KeyScope scope)
144 {
145 /* This object can be cached for a long time */
146 return !EBIT_TEST(flags, RELEASE_REQUEST) && setPublicKey(scope);
147 }
148
149 void
150 StoreEntry::makePrivate(const bool shareable)
151 {
152 releaseRequest(shareable); /* delete object when not used */
153 }
154
155 void
156 StoreEntry::clearPrivate()
157 {
158 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
159 EBIT_CLR(flags, KEY_PRIVATE);
160 shareableWhenPrivate = false;
161 }
162
163 bool
164 StoreEntry::cacheNegatively()
165 {
166 /* This object may be negatively cached */
167 if (makePublic()) {
168 negativeCache();
169 return true;
170 }
171 return false;
172 }
173
174 size_t
175 StoreEntry::inUseCount()
176 {
177 if (!pool)
178 return 0;
179 return pool->getInUseCount();
180 }
181
182 const char *
183 StoreEntry::getMD5Text() const
184 {
185 return storeKeyText((const cache_key *)key);
186 }
187
188 #include "comm.h"
189
190 void
191 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
192 {
193 StoreEntry *anEntry = (StoreEntry *)theContext;
194 anEntry->delayAwareRead(aRead.conn,
195 aRead.buf,
196 aRead.len,
197 aRead.callback);
198 }
199
200 void
201 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
202 {
203 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
204 /* sketch: readdeferer* = getdeferer.
205 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
206 */
207
208 if (amountToRead <= 0) {
209 assert (mem_obj);
210 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
211 return;
212 }
213
214 if (fd_table[conn->fd].closing()) {
215 // Readers must have closing callbacks if they want to be notified. No
216 // readers appeared to care around 2009/12/14 as they skipped reading
217 // for other reasons. Closing may already be true at the delyaAwareRead
218 // call time or may happen while we wait after delayRead() above.
219 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
220 callback);
221 return; // the read callback will never be called
222 }
223
224 comm_read(conn, buf, amountToRead, callback);
225 }
226
227 size_t
228 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
229 {
230 if (mem_obj == NULL)
231 return aRange.end;
232
233 #if URL_CHECKSUM_DEBUG
234
235 mem_obj->checkUrlChecksum();
236
237 #endif
238
239 if (!mem_obj->readAheadPolicyCanRead())
240 return 0;
241
242 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
243 }
244
245 bool
246 StoreEntry::checkDeferRead(int) const
247 {
248 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
249 }
250
251 void
252 StoreEntry::setNoDelay(bool const newValue)
253 {
254 if (mem_obj)
255 mem_obj->setNoDelay(newValue);
256 }
257
258 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
259 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
260 // It does not mean we will read from disk exclusively (or at all!).
261 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
262 // XXX: Collapsed clients cannot predict their type.
263 store_client_t
264 StoreEntry::storeClientType() const
265 {
266 /* The needed offset isn't in memory
267 * XXX TODO: this is wrong for range requests
268 * as the needed offset may *not* be 0, AND
269 * offset 0 in the memory object is the HTTP headers.
270 */
271
272 assert(mem_obj);
273
274 if (mem_obj->inmem_lo)
275 return STORE_DISK_CLIENT;
276
277 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
278 /* I don't think we should be adding clients to aborted entries */
279 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
280 return STORE_MEM_CLIENT;
281 }
282
283 if (store_status == STORE_OK) {
284 /* the object has completed. */
285
286 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
287 if (swappedOut()) {
288 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
289 if (mem_obj->endOffset() == mem_obj->object_sz) {
290 /* hot object fully swapped in (XXX: or swapped out?) */
291 return STORE_MEM_CLIENT;
292 }
293 } else {
294 /* Memory-only, or currently being swapped out */
295 return STORE_MEM_CLIENT;
296 }
297 }
298 return STORE_DISK_CLIENT;
299 }
300
301 /* here and past, entry is STORE_PENDING */
302 /*
303 * If this is the first client, let it be the mem client
304 */
305 if (mem_obj->nclients == 1)
306 return STORE_MEM_CLIENT;
307
308 /*
309 * If there is no disk file to open yet, we must make this a
310 * mem client. If we can't open the swapin file before writing
311 * to the client, there is no guarantee that we will be able
312 * to open it later when we really need it.
313 */
314 if (swap_status == SWAPOUT_NONE)
315 return STORE_MEM_CLIENT;
316
317 /*
318 * otherwise, make subsequent clients read from disk so they
319 * can not delay the first, and vice-versa.
320 */
321 return STORE_DISK_CLIENT;
322 }
323
324 StoreEntry::StoreEntry() :
325 mem_obj(NULL),
326 timestamp(-1),
327 lastref(-1),
328 expires(-1),
329 lastModified_(-1),
330 swap_file_sz(0),
331 refcount(0),
332 flags(0),
333 swap_filen(-1),
334 swap_dirn(-1),
335 mem_status(NOT_IN_MEMORY),
336 ping_status(PING_NONE),
337 store_status(STORE_PENDING),
338 swap_status(SWAPOUT_NONE),
339 lock_count(0),
340 shareableWhenPrivate(false)
341 {
342 debugs(20, 5, "StoreEntry constructed, this=" << this);
343 }
344
345 StoreEntry::~StoreEntry()
346 {
347 debugs(20, 5, "StoreEntry destructed, this=" << this);
348 }
349
350 #if USE_ADAPTATION
351 void
352 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
353 {
354 if (!deferredProducer)
355 deferredProducer = producer;
356 else
357 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
358 *deferredProducer << ", requested call: " << *producer);
359 }
360
361 void
362 StoreEntry::kickProducer()
363 {
364 if (deferredProducer != NULL) {
365 ScheduleCallHere(deferredProducer);
366 deferredProducer = NULL;
367 }
368 }
369 #endif
370
371 void
372 StoreEntry::destroyMemObject()
373 {
374 debugs(20, 3, mem_obj << " in " << *this);
375
376 // Store::Root() is FATALly missing during shutdown
377 if (hasTransients() && !shutting_down)
378 Store::Root().transientsDisconnect(*this);
379 if (hasMemStore() && !shutting_down)
380 Store::Root().memoryDisconnect(*this);
381
382 if (MemObject *mem = mem_obj) {
383 setMemStatus(NOT_IN_MEMORY);
384 mem_obj = NULL;
385 delete mem;
386 }
387 }
388
389 void
390 destroyStoreEntry(void *data)
391 {
392 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
393 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
394 assert(e != NULL);
395
396 if (e == NullStoreEntry::getInstance())
397 return;
398
399 // Store::Root() is FATALly missing during shutdown
400 if (e->hasDisk() && !shutting_down)
401 e->disk().disconnect(*e);
402
403 e->destroyMemObject();
404
405 e->hashDelete();
406
407 assert(e->key == NULL);
408
409 delete e;
410 }
411
412 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
413
414 void
415 StoreEntry::hashInsert(const cache_key * someKey)
416 {
417 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
418 assert(!key);
419 key = storeKeyDup(someKey);
420 hash_join(store_table, this);
421 }
422
423 void
424 StoreEntry::hashDelete()
425 {
426 if (key) { // some test cases do not create keys and do not hashInsert()
427 hash_remove_link(store_table, this);
428 storeKeyFree((const cache_key *)key);
429 key = NULL;
430 }
431 }
432
433 /* -------------------------------------------------------------------------- */
434
435 void
436 StoreEntry::lock(const char *context)
437 {
438 ++lock_count;
439 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
440 }
441
442 void
443 StoreEntry::touch()
444 {
445 lastref = squid_curtime;
446 }
447
448 void
449 StoreEntry::releaseRequest(const bool shareable)
450 {
451 debugs(20, 3, shareable << ' ' << *this);
452 if (!shareable)
453 shareableWhenPrivate = false; // may already be false
454 if (EBIT_TEST(flags, RELEASE_REQUEST))
455 return;
456
457 setPrivateKey(shareable, true);
458 }
459
460 int
461 StoreEntry::unlock(const char *context)
462 {
463 debugs(20, 3, (context ? context : "somebody") <<
464 " unlocking key " << getMD5Text() << ' ' << *this);
465 assert(lock_count > 0);
466 --lock_count;
467
468 if (lock_count)
469 return (int) lock_count;
470
471 abandon(context);
472 return 0;
473 }
474
475 /// keep the unlocked StoreEntry object in the local store_table (if needed) or
476 /// delete it (otherwise)
477 void
478 StoreEntry::doAbandon(const char *context)
479 {
480 debugs(20, 5, *this << " via " << (context ? context : "somebody"));
481 assert(!locked());
482 assert(storePendingNClients(this) == 0);
483
484 // Both aborted local writers and aborted local readers (of remote writers)
485 // are STORE_PENDING, but aborted readers should never release().
486 if (EBIT_TEST(flags, RELEASE_REQUEST) ||
487 (store_status == STORE_PENDING && !Store::Root().transientsReader(*this))) {
488 this->release();
489 return;
490 }
491
492 if (EBIT_TEST(flags, KEY_PRIVATE))
493 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
494
495 Store::Root().handleIdleEntry(*this); // may delete us
496 }
497
498 void
499 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
500 {
501 assert (aClient);
502 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
503
504 if (!result)
505 aClient->created (NullStoreEntry::getInstance());
506 else
507 aClient->created (result);
508 }
509
510 void
511 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
512 {
513 assert (aClient);
514 StoreEntry *result = storeGetPublicByRequest (request);
515
516 if (!result)
517 result = NullStoreEntry::getInstance();
518
519 aClient->created (result);
520 }
521
522 void
523 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
524 {
525 assert (aClient);
526 StoreEntry *result = storeGetPublic (uri, method);
527
528 if (!result)
529 result = NullStoreEntry::getInstance();
530
531 aClient->created (result);
532 }
533
534 StoreEntry *
535 storeGetPublic(const char *uri, const HttpRequestMethod& method)
536 {
537 return Store::Root().find(storeKeyPublic(uri, method));
538 }
539
540 StoreEntry *
541 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
542 {
543 return Store::Root().find(storeKeyPublicByRequestMethod(req, method, keyScope));
544 }
545
546 StoreEntry *
547 storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
548 {
549 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
550
551 if (e == NULL && req->method == Http::METHOD_HEAD)
552 /* We can generate a HEAD reply from a cached GET object */
553 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
554
555 return e;
556 }
557
558 static int
559 getKeyCounter(void)
560 {
561 static int key_counter = 0;
562
563 if (++key_counter < 0)
564 key_counter = 1;
565
566 return key_counter;
567 }
568
569 /* RBC 20050104 AFAICT this should become simpler:
570 * rather than reinserting with a special key it should be marked
571 * as 'released' and then cleaned up when refcounting indicates.
572 * the StoreHashIndex could well implement its 'released' in the
573 * current manner.
574 * Also, clean log writing should skip over ia,t
575 * Otherwise, we need a 'remove from the index but not the store
576 * concept'.
577 */
578 void
579 StoreEntry::setPrivateKey(const bool shareable, const bool permanent)
580 {
581 debugs(20, 3, shareable << permanent << ' ' << *this);
582 if (permanent)
583 EBIT_SET(flags, RELEASE_REQUEST); // may already be set
584 if (!shareable)
585 shareableWhenPrivate = false; // may already be false
586
587 if (EBIT_TEST(flags, KEY_PRIVATE))
588 return;
589
590 if (key) {
591 Store::Root().evictCached(*this); // all caches/workers will know
592 hashDelete();
593 }
594
595 if (mem_obj && mem_obj->hasUris())
596 mem_obj->id = getKeyCounter();
597 const cache_key *newkey = storeKeyPrivate();
598
599 assert(hash_lookup(store_table, newkey) == NULL);
600 EBIT_SET(flags, KEY_PRIVATE);
601 shareableWhenPrivate = shareable;
602 hashInsert(newkey);
603 }
604
605 bool
606 StoreEntry::setPublicKey(const KeyScope scope)
607 {
608 debugs(20, 3, *this);
609 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
610 return true; // already public
611
612 assert(mem_obj);
613
614 /*
615 * We can't make RELEASE_REQUEST objects public. Depending on
616 * when RELEASE_REQUEST gets set, we might not be swapping out
617 * the object. If we're not swapping out, then subsequent
618 * store clients won't be able to access object data which has
619 * been freed from memory.
620 *
621 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
622 */
623 #if MORE_DEBUG_OUTPUT
624
625 if (EBIT_TEST(flags, RELEASE_REQUEST))
626 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
627
628 #endif
629
630 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
631
632 try {
633 EntryGuard newVaryMarker(adjustVary(), "setPublicKey+failure");
634 const cache_key *pubKey = calcPublicKey(scope);
635 Store::Root().addWriting(this, pubKey);
636 forcePublicKey(pubKey);
637 newVaryMarker.unlockAndReset("setPublicKey+success");
638 return true;
639 } catch (const std::exception &ex) {
640 debugs(20, 2, "for " << *this << " failed: " << ex.what());
641 }
642 return false;
643 }
644
645 void
646 StoreEntry::clearPublicKeyScope()
647 {
648 if (!key || EBIT_TEST(flags, KEY_PRIVATE))
649 return; // probably the old public key was deleted or made private
650
651 // TODO: adjustVary() when collapsed revalidation supports that
652
653 const cache_key *newKey = calcPublicKey(ksDefault);
654 if (!storeKeyHashCmp(key, newKey))
655 return; // probably another collapsed revalidation beat us to this change
656
657 forcePublicKey(newKey);
658 }
659
660 /// Unconditionally sets public key for this store entry.
661 /// Releases the old entry with the same public key (if any).
662 void
663 StoreEntry::forcePublicKey(const cache_key *newkey)
664 {
665 debugs(20, 3, storeKeyText(newkey) << " for " << *this);
666 assert(mem_obj);
667
668 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
669 assert(e2 != this);
670 debugs(20, 3, "releasing clashing " << *e2);
671 e2->release(true);
672 }
673
674 if (key)
675 hashDelete();
676
677 clearPrivate();
678
679 assert(mem_obj->hasUris());
680 hashInsert(newkey);
681
682 if (hasDisk())
683 storeDirSwapLog(this, SWAP_LOG_ADD);
684 }
685
686 /// Calculates correct public key for feeding forcePublicKey().
687 /// Assumes adjustVary() has been called for this entry already.
688 const cache_key *
689 StoreEntry::calcPublicKey(const KeyScope keyScope)
690 {
691 assert(mem_obj);
692 return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
693 storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
694 }
695
696 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
697 /// The vary_headers field is used to calculate the Vary marker key.
698 /// Releases the old Vary marker with an outdated key (if any).
699 /// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
700 /// \throws std::exception on failures
701 StoreEntry *
702 StoreEntry::adjustVary()
703 {
704 assert(mem_obj);
705
706 if (!mem_obj->request)
707 return nullptr;
708
709 HttpRequestPointer request(mem_obj->request);
710
711 if (mem_obj->vary_headers.isEmpty()) {
712 /* First handle the case where the object no longer varies */
713 request->vary_headers.clear();
714 } else {
715 if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
716 /* Oops.. the variance has changed. Kill the base object
717 * to record the new variance key
718 */
719 request->vary_headers.clear(); /* free old "bad" variance key */
720 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
721 pe->release(true);
722 }
723
724 /* Make sure the request knows the variance status */
725 if (request->vary_headers.isEmpty())
726 request->vary_headers = httpMakeVaryMark(request.getRaw(), mem_obj->getReply().getRaw());
727 }
728
729 // TODO: storeGetPublic() calls below may create unlocked entries.
730 // We should add/use storeHas() API or lock/unlock those entries.
731 if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
732 /* Create "vary" base object */
733 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
734 // XXX: storeCreateEntry() already tries to make `pe` public under
735 // certain conditions. If those conditions do not apply to Vary markers,
736 // then refactor to call storeCreatePureEntry() above. Otherwise,
737 // refactor to simply check whether `pe` is already public below.
738 if (!pe->makePublic()) {
739 pe->unlock("StoreEntry::adjustVary+failed_makePublic");
740 throw TexcHere("failed to make Vary marker public");
741 }
742 /* We are allowed to do this typecast */
743 HttpReply *rep = new HttpReply;
744 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
745 String vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
746
747 if (vary.size()) {
748 /* Again, we own this structure layout */
749 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
750 vary.clean();
751 }
752
753 #if X_ACCELERATOR_VARY
754 vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
755
756 if (vary.size() > 0) {
757 /* Again, we own this structure layout */
758 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
759 vary.clean();
760 }
761
762 #endif
763 pe->replaceHttpReply(rep, false); // no write until timestampsSet()
764
765 pe->timestampsSet();
766
767 pe->startWriting(); // after timestampsSet()
768
769 pe->complete();
770
771 return pe;
772 }
773 return nullptr;
774 }
775
776 StoreEntry *
777 storeCreatePureEntry(const char *url, const char *log_url, const HttpRequestMethod& method)
778 {
779 StoreEntry *e = NULL;
780 debugs(20, 3, "storeCreateEntry: '" << url << "'");
781
782 e = new StoreEntry();
783 e->createMemObject(url, log_url, method);
784
785 e->store_status = STORE_PENDING;
786 e->refcount = 0;
787 e->lastref = squid_curtime;
788 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
789 e->ping_status = PING_NONE;
790 EBIT_SET(e->flags, ENTRY_VALIDATED);
791 return e;
792 }
793
794 StoreEntry *
795 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
796 {
797 StoreEntry *e = storeCreatePureEntry(url, logUrl, method);
798 e->lock("storeCreateEntry");
799
800 if (!neighbors_do_private_keys && flags.hierarchical && flags.cachable && e->setPublicKey())
801 return e;
802
803 e->setPrivateKey(false, !flags.cachable);
804 return e;
805 }
806
807 /* Mark object as expired */
808 void
809 StoreEntry::expireNow()
810 {
811 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
812 expires = squid_curtime;
813 }
814
815 void
816 StoreEntry::write (StoreIOBuffer writeBuffer)
817 {
818 assert(mem_obj != NULL);
819 /* This assert will change when we teach the store to update */
820 PROF_start(StoreEntry_write);
821 assert(store_status == STORE_PENDING);
822
823 // XXX: caller uses content offset, but we also store headers
824 if (const HttpReplyPointer reply = mem_obj->getReply())
825 writeBuffer.offset += reply->hdr_sz;
826
827 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
828 PROF_stop(StoreEntry_write);
829 storeGetMemSpace(writeBuffer.length);
830 mem_obj->write(writeBuffer);
831
832 if (!EBIT_TEST(flags, DELAY_SENDING))
833 invokeHandlers();
834 }
835
836 /* Append incoming data from a primary server to an entry. */
837 void
838 StoreEntry::append(char const *buf, int len)
839 {
840 assert(mem_obj != NULL);
841 assert(len >= 0);
842 assert(store_status == STORE_PENDING);
843
844 StoreIOBuffer tempBuffer;
845 tempBuffer.data = (char *)buf;
846 tempBuffer.length = len;
847 /*
848 * XXX sigh, offset might be < 0 here, but it gets "corrected"
849 * later. This offset crap is such a mess.
850 */
851 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
852 write(tempBuffer);
853 }
854
855 void
856 StoreEntry::vappendf(const char *fmt, va_list vargs)
857 {
858 LOCAL_ARRAY(char, buf, 4096);
859 *buf = 0;
860 int x;
861
862 #ifdef VA_COPY
863 va_args ap;
864 /* Fix of bug 753r. The value of vargs is undefined
865 * after vsnprintf() returns. Make a copy of vargs
866 * incase we loop around and call vsnprintf() again.
867 */
868 VA_COPY(ap,vargs);
869 errno = 0;
870 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
871 fatal(xstrerr(errno));
872 return;
873 }
874 va_end(ap);
875 #else /* VA_COPY */
876 errno = 0;
877 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
878 fatal(xstrerr(errno));
879 return;
880 }
881 #endif /*VA_COPY*/
882
883 if (x < static_cast<int>(sizeof(buf))) {
884 append(buf, x);
885 return;
886 }
887
888 // okay, do it the slow way.
889 char *buf2 = new char[x+1];
890 int y = vsnprintf(buf2, x+1, fmt, vargs);
891 assert(y >= 0 && y == x);
892 append(buf2, y);
893 delete[] buf2;
894 }
895
896 // deprecated. use StoreEntry::appendf() instead.
897 void
898 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
899 {
900 va_list args;
901 va_start(args, fmt);
902 e->vappendf(fmt, args);
903 va_end(args);
904 }
905
906 // deprecated. use StoreEntry::appendf() instead.
907 void
908 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
909 {
910 e->vappendf(fmt, vargs);
911 }
912
913 struct _store_check_cachable_hist {
914
915 struct {
916 int non_get;
917 int not_entry_cachable;
918 int wrong_content_length;
919 int negative_cached;
920 int too_big;
921 int too_small;
922 int private_key;
923 int too_many_open_files;
924 int too_many_open_fds;
925 int missing_parts;
926 } no;
927
928 struct {
929 int Default;
930 } yes;
931 } store_check_cachable_hist;
932
933 int
934 storeTooManyDiskFilesOpen(void)
935 {
936 if (Config.max_open_disk_fds == 0)
937 return 0;
938
939 if (store_open_disk_fd > Config.max_open_disk_fds)
940 return 1;
941
942 return 0;
943 }
944
945 int
946 StoreEntry::checkTooSmall()
947 {
948 if (EBIT_TEST(flags, ENTRY_SPECIAL))
949 return 0;
950
951 if (STORE_OK == store_status)
952 if (mem_obj->object_sz >= 0 &&
953 mem_obj->object_sz < Config.Store.minObjectSize)
954 return 1;
955 if (getReply()->content_length > -1)
956 if (getReply()->content_length < Config.Store.minObjectSize)
957 return 1;
958 return 0;
959 }
960
961 bool
962 StoreEntry::checkTooBig() const
963 {
964 if (mem_obj->endOffset() > store_maxobjsize)
965 return true;
966
967 if (getReply()->content_length < 0)
968 return false;
969
970 return (getReply()->content_length > store_maxobjsize);
971 }
972
973 // TODO: move "too many open..." checks outside -- we are called too early/late
974 bool
975 StoreEntry::checkCachable()
976 {
977 // XXX: This method is used for both memory and disk caches, but some
978 // checks are specific to disk caches. Move them to mayStartSwapOut().
979
980 // XXX: This method may be called several times, sometimes with different
981 // outcomes, making store_check_cachable_hist counters misleading.
982
983 // check this first to optimize handling of repeated calls for uncachables
984 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
985 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
986 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
987 return 0; // avoid rerequesting release below
988 }
989
990 #if CACHE_ALL_METHODS
991
992 if (mem_obj->method != Http::METHOD_GET) {
993 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
994 ++store_check_cachable_hist.no.non_get;
995 } else
996 #endif
997 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
998 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
999 ++store_check_cachable_hist.no.wrong_content_length;
1000 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
1001 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
1002 ++store_check_cachable_hist.no.negative_cached;
1003 return 0; /* avoid release call below */
1004 } else if (!mem_obj || !getReply()) {
1005 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1006 // this segfault protection, but how can we get such a HIT?
1007 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1008 ++store_check_cachable_hist.no.missing_parts;
1009 } else if (checkTooBig()) {
1010 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1011 ++store_check_cachable_hist.no.too_big;
1012 } else if (checkTooSmall()) {
1013 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1014 ++store_check_cachable_hist.no.too_small;
1015 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1016 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1017 ++store_check_cachable_hist.no.private_key;
1018 } else if (hasDisk()) {
1019 /*
1020 * the remaining cases are only relevant if we haven't
1021 * started swapping out the object yet.
1022 */
1023 return 1;
1024 } else if (storeTooManyDiskFilesOpen()) {
1025 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1026 ++store_check_cachable_hist.no.too_many_open_files;
1027 } else if (fdNFree() < RESERVED_FD) {
1028 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1029 ++store_check_cachable_hist.no.too_many_open_fds;
1030 } else {
1031 ++store_check_cachable_hist.yes.Default;
1032 return 1;
1033 }
1034
1035 releaseRequest();
1036 return 0;
1037 }
1038
1039 void
1040 storeCheckCachableStats(StoreEntry *sentry)
1041 {
1042 storeAppendPrintf(sentry, "Category\t Count\n");
1043
1044 #if CACHE_ALL_METHODS
1045
1046 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1047 store_check_cachable_hist.no.non_get);
1048 #endif
1049
1050 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1051 store_check_cachable_hist.no.not_entry_cachable);
1052 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1053 store_check_cachable_hist.no.wrong_content_length);
1054 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1055 store_check_cachable_hist.no.negative_cached);
1056 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1057 store_check_cachable_hist.no.missing_parts);
1058 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1059 store_check_cachable_hist.no.too_big);
1060 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1061 store_check_cachable_hist.no.too_small);
1062 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1063 store_check_cachable_hist.no.private_key);
1064 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1065 store_check_cachable_hist.no.too_many_open_files);
1066 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1067 store_check_cachable_hist.no.too_many_open_fds);
1068 storeAppendPrintf(sentry, "yes.default\t%d\n",
1069 store_check_cachable_hist.yes.Default);
1070 }
1071
1072 void
1073 StoreEntry::lengthWentBad(const char *reason)
1074 {
1075 debugs(20, 3, "because " << reason << ": " << *this);
1076 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1077 releaseRequest();
1078 }
1079
1080 void
1081 StoreEntry::complete()
1082 {
1083 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1084
1085 if (store_status != STORE_PENDING) {
1086 /*
1087 * if we're not STORE_PENDING, then probably we got aborted
1088 * and there should be NO clients on this entry
1089 */
1090 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1091 assert(mem_obj->nclients == 0);
1092 return;
1093 }
1094
1095 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1096 * in use of object_sz?
1097 */
1098 mem_obj->object_sz = mem_obj->endOffset();
1099
1100 store_status = STORE_OK;
1101
1102 assert(mem_status == NOT_IN_MEMORY);
1103
1104 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1105 lengthWentBad("!validLength() in complete()");
1106
1107 #if USE_CACHE_DIGESTS
1108 if (mem_obj->request)
1109 mem_obj->request->hier.store_complete_stop = current_time;
1110
1111 #endif
1112 /*
1113 * We used to call invokeHandlers, then storeSwapOut. However,
1114 * Madhukar Reddy <myreddy@persistence.com> reported that
1115 * responses without content length would sometimes get released
1116 * in client_side, thinking that the response is incomplete.
1117 */
1118 invokeHandlers();
1119 }
1120
1121 /*
1122 * Someone wants to abort this transfer. Set the reason in the
1123 * request structure, call the callback and mark the
1124 * entry for releasing
1125 */
1126 void
1127 StoreEntry::abort()
1128 {
1129 ++statCounter.aborted_requests;
1130 assert(store_status == STORE_PENDING);
1131 assert(mem_obj != NULL);
1132 debugs(20, 6, "storeAbort: " << getMD5Text());
1133
1134 lock("StoreEntry::abort"); /* lock while aborting */
1135 negativeCache();
1136
1137 releaseRequest();
1138
1139 EBIT_SET(flags, ENTRY_ABORTED);
1140
1141 setMemStatus(NOT_IN_MEMORY);
1142
1143 store_status = STORE_OK;
1144
1145 /* Notify the server side */
1146
1147 /*
1148 * DPW 2007-05-07
1149 * Should we check abort.data for validity?
1150 */
1151 if (mem_obj->abort.callback) {
1152 if (!cbdataReferenceValid(mem_obj->abort.data))
1153 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1154 eventAdd("mem_obj->abort.callback",
1155 mem_obj->abort.callback,
1156 mem_obj->abort.data,
1157 0.0,
1158 true);
1159 unregisterAbort();
1160 }
1161
1162 /* XXX Should we reverse these two, so that there is no
1163 * unneeded disk swapping triggered?
1164 */
1165 /* Notify the client side */
1166 invokeHandlers();
1167
1168 // abort swap out, invalidating what was created so far (release follows)
1169 swapOutFileClose(StoreIOState::writerGone);
1170
1171 unlock("StoreEntry::abort"); /* unlock */
1172 }
1173
1174 /**
1175 * Clear Memory storage to accommodate the given object len
1176 */
1177 void
1178 storeGetMemSpace(int size)
1179 {
1180 PROF_start(storeGetMemSpace);
1181 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1182 Store::Root().freeMemorySpace(size);
1183 PROF_stop(storeGetMemSpace);
1184 }
1185
1186 /* thunk through to Store::Root().maintain(). Note that this would be better still
1187 * if registered against the root store itself, but that requires more complex
1188 * update logic - bigger fish to fry first. Long term each store when
1189 * it becomes active will self register
1190 */
1191 void
1192 Store::Maintain(void *)
1193 {
1194 Store::Root().maintain();
1195
1196 /* Reregister a maintain event .. */
1197 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1198
1199 }
1200
1201 /* The maximum objects to scan for maintain storage space */
1202 #define MAINTAIN_MAX_SCAN 1024
1203 #define MAINTAIN_MAX_REMOVE 64
1204
1205 void
1206 StoreEntry::release(const bool shareable)
1207 {
1208 PROF_start(storeRelease);
1209 debugs(20, 3, shareable << ' ' << *this << ' ' << getMD5Text());
1210 /* If, for any reason we can't discard this object because of an
1211 * outstanding request, mark it for pending release */
1212
1213 if (locked()) {
1214 releaseRequest(shareable);
1215 PROF_stop(storeRelease);
1216 return;
1217 }
1218
1219 if (Store::Controller::store_dirs_rebuilding && hasDisk()) {
1220 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1221
1222 // lock the entry until rebuilding is done
1223 lock("storeLateRelease");
1224 releaseRequest(shareable);
1225 LateReleaseStack.push(this);
1226 return;
1227 }
1228
1229 storeLog(STORE_LOG_RELEASE, this);
1230 Store::Root().evictCached(*this);
1231 destroyStoreEntry(static_cast<hash_link *>(this));
1232 PROF_stop(storeRelease);
1233 }
1234
1235 static void
1236 storeLateRelease(void *)
1237 {
1238 StoreEntry *e;
1239 static int n = 0;
1240
1241 if (Store::Controller::store_dirs_rebuilding) {
1242 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1243 return;
1244 }
1245
1246 // TODO: this works but looks unelegant.
1247 for (int i = 0; i < 10; ++i) {
1248 if (LateReleaseStack.empty()) {
1249 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1250 return;
1251 } else {
1252 e = LateReleaseStack.top();
1253 LateReleaseStack.pop();
1254 }
1255
1256 e->unlock("storeLateRelease");
1257 ++n;
1258 }
1259
1260 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1261 }
1262
1263 /* return 1 if a store entry is locked */
1264 int
1265 StoreEntry::locked() const
1266 {
1267 if (lock_count)
1268 return 1;
1269
1270 /*
1271 * SPECIAL, PUBLIC entries should be "locked";
1272 * XXX: Their owner should lock them then instead of relying on this hack.
1273 */
1274 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1275 if (!EBIT_TEST(flags, KEY_PRIVATE))
1276 return 1;
1277
1278 return 0;
1279 }
1280
1281 bool
1282 StoreEntry::validLength() const
1283 {
1284 int64_t diff;
1285 const HttpReply *reply;
1286 assert(mem_obj != NULL);
1287 reply = getReply();
1288 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1289 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1290 objectLen());
1291 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1292 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1293
1294 if (reply->content_length < 0) {
1295 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1296 return 1;
1297 }
1298
1299 if (reply->hdr_sz == 0) {
1300 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1301 return 1;
1302 }
1303
1304 if (mem_obj->method == Http::METHOD_HEAD) {
1305 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1306 return 1;
1307 }
1308
1309 if (reply->sline.status() == Http::scNotModified)
1310 return 1;
1311
1312 if (reply->sline.status() == Http::scNoContent)
1313 return 1;
1314
1315 diff = reply->hdr_sz + reply->content_length - objectLen();
1316
1317 if (diff == 0)
1318 return 1;
1319
1320 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1321
1322 return 0;
1323 }
1324
1325 static void
1326 storeRegisterWithCacheManager(void)
1327 {
1328 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1329 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1330 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1331 storeCheckCachableStats, 0, 1);
1332 }
1333
1334 void
1335 storeInit(void)
1336 {
1337 storeKeyInit();
1338 mem_policy = createRemovalPolicy(Config.memPolicy);
1339 storeDigestInit();
1340 storeLogOpen();
1341 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1342 Store::Root().init();
1343 storeRebuildStart();
1344
1345 storeRegisterWithCacheManager();
1346 }
1347
1348 void
1349 storeConfigure(void)
1350 {
1351 Store::Root().updateLimits();
1352 }
1353
1354 bool
1355 StoreEntry::memoryCachable()
1356 {
1357 if (!checkCachable())
1358 return 0;
1359
1360 if (mem_obj == NULL)
1361 return 0;
1362
1363 if (mem_obj->data_hdr.size() == 0)
1364 return 0;
1365
1366 if (mem_obj->inmem_lo != 0)
1367 return 0;
1368
1369 if (!Config.onoff.memory_cache_first && swappedOut() && refcount == 1)
1370 return 0;
1371
1372 return 1;
1373 }
1374
1375 int
1376 StoreEntry::checkNegativeHit() const
1377 {
1378 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1379 return 0;
1380
1381 if (expires <= squid_curtime)
1382 return 0;
1383
1384 if (store_status != STORE_OK)
1385 return 0;
1386
1387 return 1;
1388 }
1389
1390 /**
1391 * Set object for negative caching.
1392 * Preserves any expiry information given by the server.
1393 * In absence of proper expiry info it will set to expire immediately,
1394 * or with HTTP-violations enabled the configured negative-TTL is observed
1395 */
1396 void
1397 StoreEntry::negativeCache()
1398 {
1399 // XXX: should make the default for expires 0 instead of -1
1400 // so we can distinguish "Expires: -1" from nothing.
1401 if (expires <= 0)
1402 #if USE_HTTP_VIOLATIONS
1403 expires = squid_curtime + Config.negativeTtl;
1404 #else
1405 expires = squid_curtime;
1406 #endif
1407 EBIT_SET(flags, ENTRY_NEGCACHED);
1408 }
1409
1410 void
1411 storeFreeMemory(void)
1412 {
1413 Store::FreeMemory();
1414 #if USE_CACHE_DIGESTS
1415 delete store_digest;
1416 #endif
1417 store_digest = NULL;
1418 }
1419
1420 int
1421 expiresMoreThan(time_t expires, time_t when)
1422 {
1423 if (expires < 0) /* No Expires given */
1424 return 1;
1425
1426 return (expires > (squid_curtime + when));
1427 }
1428
1429 int
1430 StoreEntry::validToSend() const
1431 {
1432 if (EBIT_TEST(flags, RELEASE_REQUEST))
1433 return 0;
1434
1435 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1436 if (expires <= squid_curtime)
1437 return 0;
1438
1439 if (EBIT_TEST(flags, ENTRY_ABORTED))
1440 return 0;
1441
1442 // now check that the entry has a cache backing or is collapsed
1443 if (hasDisk()) // backed by a disk cache
1444 return 1;
1445
1446 if (swappingOut()) // will be backed by a disk cache
1447 return 1;
1448
1449 if (!mem_obj) // not backed by a memory cache and not collapsed
1450 return 0;
1451
1452 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1453 // disk cache backing that store_client constructor will assert. XXX: This
1454 // is wrong for range requests (that could feed off nibbled memory) and for
1455 // entries backed by the shared memory cache (that could, in theory, get
1456 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1457 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1458 return 0;
1459
1460 // The following check is correct but useless at this position. TODO: Move
1461 // it up when the shared memory cache can either replenish locally nibbled
1462 // bytes or, better, does not use local RAM copy at all.
1463 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1464 // return 1;
1465
1466 return 1;
1467 }
1468
1469 bool
1470 StoreEntry::timestampsSet()
1471 {
1472 const HttpReply *reply = getReply();
1473 time_t served_date = reply->date;
1474 int age = reply->header.getInt(Http::HdrType::AGE);
1475 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1476 /* make sure that 0 <= served_date <= squid_curtime */
1477
1478 if (served_date < 0 || served_date > squid_curtime)
1479 served_date = squid_curtime;
1480
1481 /* Bug 1791:
1482 * If the returned Date: is more than 24 hours older than
1483 * the squid_curtime, then one of us needs to use NTP to set our
1484 * clock. We'll pretend that our clock is right.
1485 */
1486 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1487 served_date = squid_curtime;
1488
1489 /*
1490 * Compensate with Age header if origin server clock is ahead
1491 * of us and there is a cache in between us and the origin
1492 * server. But DONT compensate if the age value is larger than
1493 * squid_curtime because it results in a negative served_date.
1494 */
1495 if (age > squid_curtime - served_date)
1496 if (squid_curtime > age)
1497 served_date = squid_curtime - age;
1498
1499 // compensate for Squid-to-server and server-to-Squid delays
1500 if (mem_obj && mem_obj->request) {
1501 struct timeval responseTime;
1502 if (mem_obj->request->hier.peerResponseTime(responseTime))
1503 served_date -= responseTime.tv_sec;
1504 }
1505
1506 time_t exp = 0;
1507 if (reply->expires > 0 && reply->date > -1)
1508 exp = served_date + (reply->expires - reply->date);
1509 else
1510 exp = reply->expires;
1511
1512 if (timestamp == served_date && expires == exp) {
1513 // if the reply lacks LMT, then we now know that our effective
1514 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1515 // new modification times must match
1516 if (reply->last_modified < 0 || reply->last_modified == lastModified())
1517 return false; // nothing has changed
1518 }
1519
1520 expires = exp;
1521
1522 lastModified_ = reply->last_modified;
1523
1524 timestamp = served_date;
1525
1526 return true;
1527 }
1528
1529 void
1530 StoreEntry::registerAbort(STABH * cb, void *data)
1531 {
1532 assert(mem_obj);
1533 assert(mem_obj->abort.callback == NULL);
1534 mem_obj->abort.callback = cb;
1535 mem_obj->abort.data = cbdataReference(data);
1536 }
1537
1538 void
1539 StoreEntry::unregisterAbort()
1540 {
1541 assert(mem_obj);
1542 if (mem_obj->abort.callback) {
1543 mem_obj->abort.callback = NULL;
1544 cbdataReferenceDone(mem_obj->abort.data);
1545 }
1546 }
1547
1548 void
1549 StoreEntry::dump(int l) const
1550 {
1551 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1552 debugs(20, l, "StoreEntry->next: " << next);
1553 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1554 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1555 debugs(20, l, "StoreEntry->lastref: " << lastref);
1556 debugs(20, l, "StoreEntry->expires: " << expires);
1557 debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
1558 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1559 debugs(20, l, "StoreEntry->refcount: " << refcount);
1560 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1561 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1562 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1563 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1564 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1565 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1566 debugs(20, l, "StoreEntry->store_status: " << store_status);
1567 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1568 }
1569
1570 /*
1571 * NOTE, this function assumes only two mem states
1572 */
1573 void
1574 StoreEntry::setMemStatus(mem_status_t new_status)
1575 {
1576 if (new_status == mem_status)
1577 return;
1578
1579 // are we using a shared memory cache?
1580 if (Config.memShared && IamWorkerProcess()) {
1581 // This method was designed to update replacement policy, not to
1582 // actually purge something from the memory cache (TODO: rename?).
1583 // Shared memory cache does not have a policy that needs updates.
1584 mem_status = new_status;
1585 return;
1586 }
1587
1588 assert(mem_obj != NULL);
1589
1590 if (new_status == IN_MEMORY) {
1591 assert(mem_obj->inmem_lo == 0);
1592
1593 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1594 debugs(20, 4, "not inserting special " << *this << " into policy");
1595 } else {
1596 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1597 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1598 }
1599
1600 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1601 } else {
1602 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1603 debugs(20, 4, "not removing special " << *this << " from policy");
1604 } else {
1605 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1606 debugs(20, 4, "removed " << *this);
1607 }
1608
1609 --hot_obj_count;
1610 }
1611
1612 mem_status = new_status;
1613 }
1614
1615 const char *
1616 StoreEntry::url() const
1617 {
1618 if (mem_obj == NULL)
1619 return "[null_mem_obj]";
1620 else
1621 return mem_obj->storeId();
1622 }
1623
1624 void
1625 StoreEntry::createMemObject()
1626 {
1627 assert(!mem_obj);
1628 mem_obj = new MemObject();
1629 }
1630
1631 void
1632 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1633 {
1634 assert(!mem_obj);
1635 ensureMemObject(aUrl, aLogUrl, aMethod);
1636 }
1637
1638 void
1639 StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1640 {
1641 if (!mem_obj)
1642 mem_obj = new MemObject();
1643 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1644 }
1645
1646 /** disable sending content to the clients.
1647 *
1648 * This just sets DELAY_SENDING.
1649 */
1650 void
1651 StoreEntry::buffer()
1652 {
1653 EBIT_SET(flags, DELAY_SENDING);
1654 }
1655
1656 /** flush any buffered content.
1657 *
1658 * This just clears DELAY_SENDING and Invokes the handlers
1659 * to begin sending anything that may be buffered.
1660 */
1661 void
1662 StoreEntry::flush()
1663 {
1664 if (EBIT_TEST(flags, DELAY_SENDING)) {
1665 EBIT_CLR(flags, DELAY_SENDING);
1666 invokeHandlers();
1667 }
1668 }
1669
1670 int64_t
1671 StoreEntry::objectLen() const
1672 {
1673 assert(mem_obj != NULL);
1674 return mem_obj->object_sz;
1675 }
1676
1677 int64_t
1678 StoreEntry::contentLen() const
1679 {
1680 assert(mem_obj != NULL);
1681 assert(getReply() != NULL);
1682 return objectLen() - getReply()->hdr_sz;
1683 }
1684
1685 HttpReply const *
1686 StoreEntry::getReply() const
1687 {
1688 return (mem_obj ? mem_obj->getReply().getRaw() : nullptr);
1689 }
1690
1691 void
1692 StoreEntry::reset()
1693 {
1694 assert (mem_obj);
1695 debugs(20, 3, url());
1696 mem_obj->reset();
1697 expires = lastModified_ = timestamp = -1;
1698 }
1699
1700 /*
1701 * storeFsInit
1702 *
1703 * This routine calls the SETUP routine for each fs type.
1704 * I don't know where the best place for this is, and I'm not going to shuffle
1705 * around large chunks of code right now (that can be done once its working.)
1706 */
1707 void
1708 storeFsInit(void)
1709 {
1710 storeReplSetup();
1711 }
1712
1713 /*
1714 * called to add another store removal policy module
1715 */
1716 void
1717 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1718 {
1719 int i;
1720
1721 /* find the number of currently known repl types */
1722 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1723 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1724 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1725 return;
1726 }
1727 }
1728
1729 /* add the new type */
1730 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1731
1732 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1733
1734 storerepl_list[i].typestr = type;
1735
1736 storerepl_list[i].create = create;
1737 }
1738
1739 /*
1740 * Create a removal policy instance
1741 */
1742 RemovalPolicy *
1743 createRemovalPolicy(RemovalPolicySettings * settings)
1744 {
1745 storerepl_entry_t *r;
1746
1747 for (r = storerepl_list; r && r->typestr; ++r) {
1748 if (strcmp(r->typestr, settings->type) == 0)
1749 return r->create(settings->args);
1750 }
1751
1752 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1753 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1754 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1755 fatalf("ERROR: Unknown policy %s\n", settings->type);
1756 return NULL; /* NOTREACHED */
1757 }
1758
1759 #if 0
1760 void
1761 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1762 {
1763 if (e->swap_file_number == filn)
1764 return;
1765
1766 if (filn < 0) {
1767 assert(-1 == filn);
1768 storeDirMapBitReset(e->swap_file_number);
1769 storeDirLRUDelete(e);
1770 e->swap_file_number = -1;
1771 } else {
1772 assert(-1 == e->swap_file_number);
1773 storeDirMapBitSet(e->swap_file_number = filn);
1774 storeDirLRUAdd(e);
1775 }
1776 }
1777
1778 #endif
1779
1780 void
1781 StoreEntry::storeErrorResponse(HttpReply *reply)
1782 {
1783 lock("StoreEntry::storeErrorResponse");
1784 buffer();
1785 replaceHttpReply(reply);
1786 flush();
1787 complete();
1788 negativeCache();
1789 releaseRequest(false); // if it is safe to negatively cache, sharing is OK
1790 unlock("StoreEntry::storeErrorResponse");
1791 }
1792
1793 /*
1794 * Replace a store entry with
1795 * a new reply. This eats the reply.
1796 */
1797 void
1798 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1799 {
1800 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1801
1802 if (!mem_obj) {
1803 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1804 return;
1805 }
1806
1807 mem_obj->replaceReply(HttpReplyPointer(rep));
1808
1809 if (andStartWriting)
1810 startWriting();
1811 }
1812
1813 void
1814 StoreEntry::startWriting()
1815 {
1816 /* TODO: when we store headers separately remove the header portion */
1817 /* TODO: mark the length of the headers ? */
1818 /* We ONLY want the headers */
1819
1820 assert (isEmpty());
1821 assert(mem_obj);
1822
1823 const HttpReply *rep = getReply();
1824 assert(rep);
1825
1826 buffer();
1827 rep->packHeadersInto(this);
1828 mem_obj->markEndOfReplyHeaders();
1829 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1830
1831 rep->body.packInto(this);
1832 flush();
1833 }
1834
1835 char const *
1836 StoreEntry::getSerialisedMetaData()
1837 {
1838 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1839 int swap_hdr_sz;
1840 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1841 storeSwapTLVFree(tlv_list);
1842 assert (swap_hdr_sz >= 0);
1843 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1844 return result;
1845 }
1846
1847 /**
1848 * Abandon the transient entry our worker has created if neither the shared
1849 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1850 * any, should notice and use Plan B instead of getting stuck waiting for us
1851 * to start swapping the entry out.
1852 */
1853 void
1854 StoreEntry::transientsAbandonmentCheck()
1855 {
1856 if (mem_obj && !Store::Root().transientsReader(*this) && // this worker is responsible
1857 hasTransients() && // other workers may be interested
1858 !hasMemStore() && // rejected by the shared memory cache
1859 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1860 debugs(20, 7, "cannot be shared: " << *this);
1861 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1862 Store::Root().stopSharing(*this);
1863 }
1864 }
1865
1866 void
1867 StoreEntry::memOutDecision(const bool)
1868 {
1869 transientsAbandonmentCheck();
1870 }
1871
1872 void
1873 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1874 {
1875 // Abandon our transient entry if neither shared memory nor disk wants it.
1876 assert(mem_obj);
1877 mem_obj->swapout.decision = decision;
1878 transientsAbandonmentCheck();
1879 }
1880
1881 void
1882 StoreEntry::trimMemory(const bool preserveSwappable)
1883 {
1884 /*
1885 * DPW 2007-05-09
1886 * Bug #1943. We must not let go any data for IN_MEMORY
1887 * objects. We have to wait until the mem_status changes.
1888 */
1889 if (mem_status == IN_MEMORY)
1890 return;
1891
1892 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1893 return; // cannot trim because we do not load them again
1894
1895 if (preserveSwappable)
1896 mem_obj->trimSwappable();
1897 else
1898 mem_obj->trimUnSwappable();
1899
1900 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1901 }
1902
1903 bool
1904 StoreEntry::modifiedSince(const time_t ims, const int imslen) const
1905 {
1906 int object_length;
1907 const time_t mod_time = lastModified();
1908
1909 debugs(88, 3, "modifiedSince: '" << url() << "'");
1910
1911 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1912
1913 if (mod_time < 0)
1914 return true;
1915
1916 /* Find size of the object */
1917 object_length = getReply()->content_length;
1918
1919 if (object_length < 0)
1920 object_length = contentLen();
1921
1922 if (mod_time > ims) {
1923 debugs(88, 3, "--> YES: entry newer than client");
1924 return true;
1925 } else if (mod_time < ims) {
1926 debugs(88, 3, "--> NO: entry older than client");
1927 return false;
1928 } else if (imslen < 0) {
1929 debugs(88, 3, "--> NO: same LMT, no client length");
1930 return false;
1931 } else if (imslen == object_length) {
1932 debugs(88, 3, "--> NO: same LMT, same length");
1933 return false;
1934 } else {
1935 debugs(88, 3, "--> YES: same LMT, different length");
1936 return true;
1937 }
1938 }
1939
1940 bool
1941 StoreEntry::hasEtag(ETag &etag) const
1942 {
1943 if (const HttpReply *reply = getReply()) {
1944 etag = reply->header.getETag(Http::HdrType::ETAG);
1945 if (etag.str)
1946 return true;
1947 }
1948 return false;
1949 }
1950
1951 bool
1952 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1953 {
1954 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
1955 return hasOneOfEtags(reqETags, false);
1956 }
1957
1958 bool
1959 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1960 {
1961 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
1962 // weak comparison is allowed only for HEAD or full-body GET requests
1963 const bool allowWeakMatch = !request.flags.isRanged &&
1964 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1965 return hasOneOfEtags(reqETags, allowWeakMatch);
1966 }
1967
1968 /// whether at least one of the request ETags matches entity ETag
1969 bool
1970 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1971 {
1972 const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
1973 if (!repETag.str) {
1974 static SBuf asterisk("*", 1);
1975 return strListIsMember(&reqETags, asterisk, ',');
1976 }
1977
1978 bool matched = false;
1979 const char *pos = NULL;
1980 const char *item;
1981 int ilen;
1982 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1983 if (!strncmp(item, "*", ilen))
1984 matched = true;
1985 else {
1986 String str;
1987 str.append(item, ilen);
1988 ETag reqETag;
1989 if (etagParseInit(&reqETag, str.termedBuf())) {
1990 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1991 etagIsStrongEqual(repETag, reqETag);
1992 }
1993 }
1994 }
1995 return matched;
1996 }
1997
1998 Store::Disk &
1999 StoreEntry::disk() const
2000 {
2001 assert(hasDisk());
2002 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
2003 assert(sd);
2004 return *sd;
2005 }
2006
2007 bool
2008 StoreEntry::hasDisk(const sdirno dirn, const sfileno filen) const
2009 {
2010 checkDisk();
2011 if (dirn < 0 && filen < 0)
2012 return swap_dirn >= 0;
2013 Must(dirn >= 0);
2014 const bool matchingDisk = (swap_dirn == dirn);
2015 return filen < 0 ? matchingDisk : (matchingDisk && swap_filen == filen);
2016 }
2017
2018 void
2019 StoreEntry::attachToDisk(const sdirno dirn, const sfileno fno, const swap_status_t status)
2020 {
2021 debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
2022 swapStatusStr[status] << " " << dirn << " " <<
2023 std::hex << std::setw(8) << std::setfill('0') <<
2024 std::uppercase << fno);
2025 checkDisk();
2026 swap_dirn = dirn;
2027 swap_filen = fno;
2028 swap_status = status;
2029 checkDisk();
2030 }
2031
2032 void
2033 StoreEntry::detachFromDisk()
2034 {
2035 swap_dirn = -1;
2036 swap_filen = -1;
2037 swap_status = SWAPOUT_NONE;
2038 }
2039
2040 void
2041 StoreEntry::checkDisk() const
2042 {
2043 const bool ok = (swap_dirn < 0) == (swap_filen < 0) &&
2044 (swap_dirn < 0) == (swap_status == SWAPOUT_NONE) &&
2045 (swap_dirn < 0 || swap_dirn < Config.cacheSwap.n_configured);
2046
2047 if (!ok) {
2048 debugs(88, DBG_IMPORTANT, "ERROR: inconsistent disk entry state " << *this);
2049 throw std::runtime_error("inconsistent disk entry state ");
2050 }
2051 }
2052
2053 /*
2054 * return true if the entry is in a state where
2055 * it can accept more data (ie with write() method)
2056 */
2057 bool
2058 StoreEntry::isAccepting() const
2059 {
2060 if (STORE_PENDING != store_status)
2061 return false;
2062
2063 if (EBIT_TEST(flags, ENTRY_ABORTED))
2064 return false;
2065
2066 return true;
2067 }
2068
2069 const char *
2070 StoreEntry::describeTimestamps() const
2071 {
2072 LOCAL_ARRAY(char, buf, 256);
2073 snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2074 static_cast<int>(timestamp),
2075 static_cast<int>(lastref),
2076 static_cast<int>(lastModified_),
2077 static_cast<int>(expires));
2078 return buf;
2079 }
2080
2081 static std::ostream &
2082 operator <<(std::ostream &os, const Store::IoStatus &io)
2083 {
2084 switch (io) {
2085 case Store::ioUndecided:
2086 os << 'u';
2087 break;
2088 case Store::ioReading:
2089 os << 'r';
2090 break;
2091 case Store::ioWriting:
2092 os << 'w';
2093 break;
2094 case Store::ioDone:
2095 os << 'o';
2096 break;
2097 }
2098 return os;
2099 }
2100
2101 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2102 {
2103 os << "e:";
2104
2105 if (e.hasTransients()) {
2106 const auto &xitTable = e.mem_obj->xitTable;
2107 os << 't' << xitTable.io << xitTable.index;
2108 }
2109
2110 if (e.hasMemStore()) {
2111 const auto &memCache = e.mem_obj->memCache;
2112 os << 'm' << memCache.io << memCache.index << '@' << memCache.offset;
2113 }
2114
2115 // Do not use e.hasDisk() here because its checkDisk() call may calls us.
2116 if (e.swap_filen > -1 || e.swap_dirn > -1)
2117 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2118
2119 os << '=';
2120
2121 // print only non-default status values, using unique letters
2122 if (e.mem_status != NOT_IN_MEMORY ||
2123 e.store_status != STORE_PENDING ||
2124 e.swap_status != SWAPOUT_NONE ||
2125 e.ping_status != PING_NONE) {
2126 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2127 if (e.store_status != STORE_PENDING) os << 's';
2128 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2129 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2130 }
2131
2132 // print only set flags, using unique letters
2133 if (e.flags) {
2134 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2135 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
2136 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2137 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2138 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2139 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
2140 if (EBIT_TEST(e.flags, KEY_PRIVATE)) {
2141 os << 'I';
2142 if (e.shareableWhenPrivate)
2143 os << 'H';
2144 }
2145 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2146 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2147 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2148 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2149 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2150 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2151 }
2152
2153 return os << '/' << &e << '*' << e.locks();
2154 }
2155
2156 /* NullStoreEntry */
2157
2158 NullStoreEntry NullStoreEntry::_instance;
2159
2160 NullStoreEntry *
2161 NullStoreEntry::getInstance()
2162 {
2163 return &_instance;
2164 }
2165
2166 char const *
2167 NullStoreEntry::getMD5Text() const
2168 {
2169 return "N/A";
2170 }
2171
2172 void
2173 NullStoreEntry::operator delete(void*)
2174 {
2175 fatal ("Attempt to delete NullStoreEntry\n");
2176 }
2177
2178 char const *
2179 NullStoreEntry::getSerialisedMetaData()
2180 {
2181 return NULL;
2182 }
2183
2184 void
2185 Store::EntryGuard::onException() noexcept
2186 {
2187 SWALLOW_EXCEPTIONS({
2188 entry_->releaseRequest(false);
2189 entry_->unlock(context_);
2190 });
2191 }
2192