2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/TextException.h"
13 #include "CacheDigest.h"
14 #include "CacheManager.h"
15 #include "comm/Connection.h"
16 #include "comm/Read.h"
22 #include "HttpReply.h"
23 #include "HttpRequest.h"
25 #include "MemObject.h"
26 #include "mgr/Registration.h"
27 #include "mgr/StoreIoAction.h"
28 #include "profiler/Profiler.h"
29 #include "repl_modules.h"
30 #include "RequestFlags.h"
31 #include "SquidConfig.h"
32 #include "SquidTime.h"
33 #include "StatCounters.h"
36 #include "store/Controller.h"
37 #include "store/Disk.h"
38 #include "store/Disks.h"
39 #include "store_digest.h"
40 #include "store_key_md5.h"
41 #include "store_log.h"
42 #include "store_rebuild.h"
43 #include "StoreClient.h"
44 #include "StoreIOState.h"
45 #include "StoreMeta.h"
47 #include "swap_log_op.h"
50 #include "DelayPools.h"
53 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
54 * XXX: convert to MEMPROXY_CLASS() API
61 #define REBUILD_TIMESTAMP_DELTA_MAX 2
63 #define STORE_IN_MEM_BUCKETS (229)
65 /** \todo Convert these string constants to enum string-arrays generated */
67 const char *memStatusStr
[] = {
72 const char *pingStatusStr
[] = {
78 const char *storeStatusStr
[] = {
83 const char *swapStatusStr
[] = {
90 * This defines an repl type
93 typedef struct _storerepl_entry storerepl_entry_t
;
95 struct _storerepl_entry
{
97 REMOVALPOLICYCREATE
*create
;
100 static storerepl_entry_t
*storerepl_list
= NULL
;
103 * local function prototypes
105 static int getKeyCounter(void);
106 static OBJH storeCheckCachableStats
;
107 static EVH storeLateRelease
;
112 static std::stack
<StoreEntry
*> LateReleaseStack
;
113 MemAllocator
*StoreEntry::pool
= NULL
;
116 Store::Stats(StoreEntry
* output
)
119 Root().stat(*output
);
122 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
123 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
125 StoreEntry::operator new (size_t bytecount
)
127 assert(bytecount
== sizeof (StoreEntry
));
130 pool
= memPoolCreate ("StoreEntry", bytecount
);
133 return pool
->alloc();
137 StoreEntry::operator delete (void *address
)
139 pool
->freeOne(address
);
143 StoreEntry::makePublic(const KeyScope scope
)
145 /* This object can be cached for a long time */
146 return !EBIT_TEST(flags
, RELEASE_REQUEST
) && setPublicKey(scope
);
150 StoreEntry::makePrivate(const bool shareable
)
152 releaseRequest(shareable
); /* delete object when not used */
156 StoreEntry::clearPrivate()
158 assert(!EBIT_TEST(flags
, RELEASE_REQUEST
));
159 EBIT_CLR(flags
, KEY_PRIVATE
);
160 shareableWhenPrivate
= false;
164 StoreEntry::cacheNegatively()
166 /* This object may be negatively cached */
175 StoreEntry::inUseCount()
179 return pool
->getInUseCount();
183 StoreEntry::getMD5Text() const
185 return storeKeyText((const cache_key
*)key
);
191 StoreEntry::DeferReader(void *theContext
, CommRead
const &aRead
)
193 StoreEntry
*anEntry
= (StoreEntry
*)theContext
;
194 anEntry
->delayAwareRead(aRead
.conn
,
201 StoreEntry::delayAwareRead(const Comm::ConnectionPointer
&conn
, char *buf
, int len
, AsyncCall::Pointer callback
)
203 size_t amountToRead
= bytesWanted(Range
<size_t>(0, len
));
204 /* sketch: readdeferer* = getdeferer.
205 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
208 if (amountToRead
<= 0) {
210 mem_obj
->delayRead(DeferredRead(DeferReader
, this, CommRead(conn
, buf
, len
, callback
)));
214 if (fd_table
[conn
->fd
].closing()) {
215 // Readers must have closing callbacks if they want to be notified. No
216 // readers appeared to care around 2009/12/14 as they skipped reading
217 // for other reasons. Closing may already be true at the delyaAwareRead
218 // call time or may happen while we wait after delayRead() above.
219 debugs(20, 3, HERE
<< "wont read from closing " << conn
<< " for " <<
221 return; // the read callback will never be called
224 comm_read(conn
, buf
, amountToRead
, callback
);
228 StoreEntry::bytesWanted (Range
<size_t> const aRange
, bool ignoreDelayPools
) const
233 #if URL_CHECKSUM_DEBUG
235 mem_obj
->checkUrlChecksum();
239 if (!mem_obj
->readAheadPolicyCanRead())
242 return mem_obj
->mostBytesWanted(aRange
.end
, ignoreDelayPools
);
246 StoreEntry::checkDeferRead(int) const
248 return (bytesWanted(Range
<size_t>(0,INT_MAX
)) == 0);
252 StoreEntry::setNoDelay(bool const newValue
)
255 mem_obj
->setNoDelay(newValue
);
258 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
259 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
260 // It does not mean we will read from disk exclusively (or at all!).
261 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
262 // XXX: Collapsed clients cannot predict their type.
264 StoreEntry::storeClientType() const
266 /* The needed offset isn't in memory
267 * XXX TODO: this is wrong for range requests
268 * as the needed offset may *not* be 0, AND
269 * offset 0 in the memory object is the HTTP headers.
274 if (mem_obj
->inmem_lo
)
275 return STORE_DISK_CLIENT
;
277 if (EBIT_TEST(flags
, ENTRY_ABORTED
)) {
278 /* I don't think we should be adding clients to aborted entries */
279 debugs(20, DBG_IMPORTANT
, "storeClientType: adding to ENTRY_ABORTED entry");
280 return STORE_MEM_CLIENT
;
283 if (store_status
== STORE_OK
) {
284 /* the object has completed. */
286 if (mem_obj
->inmem_lo
== 0 && !isEmpty()) {
288 debugs(20,7, HERE
<< mem_obj
<< " lo: " << mem_obj
->inmem_lo
<< " hi: " << mem_obj
->endOffset() << " size: " << mem_obj
->object_sz
);
289 if (mem_obj
->endOffset() == mem_obj
->object_sz
) {
290 /* hot object fully swapped in (XXX: or swapped out?) */
291 return STORE_MEM_CLIENT
;
294 /* Memory-only, or currently being swapped out */
295 return STORE_MEM_CLIENT
;
298 return STORE_DISK_CLIENT
;
301 /* here and past, entry is STORE_PENDING */
303 * If this is the first client, let it be the mem client
305 if (mem_obj
->nclients
== 1)
306 return STORE_MEM_CLIENT
;
309 * If there is no disk file to open yet, we must make this a
310 * mem client. If we can't open the swapin file before writing
311 * to the client, there is no guarantee that we will be able
312 * to open it later when we really need it.
314 if (swap_status
== SWAPOUT_NONE
)
315 return STORE_MEM_CLIENT
;
318 * otherwise, make subsequent clients read from disk so they
319 * can not delay the first, and vice-versa.
321 return STORE_DISK_CLIENT
;
324 StoreEntry::StoreEntry() :
335 mem_status(NOT_IN_MEMORY
),
336 ping_status(PING_NONE
),
337 store_status(STORE_PENDING
),
338 swap_status(SWAPOUT_NONE
),
340 shareableWhenPrivate(false)
342 debugs(20, 5, "StoreEntry constructed, this=" << this);
345 StoreEntry::~StoreEntry()
347 debugs(20, 5, "StoreEntry destructed, this=" << this);
352 StoreEntry::deferProducer(const AsyncCall::Pointer
&producer
)
354 if (!deferredProducer
)
355 deferredProducer
= producer
;
357 debugs(20, 5, HERE
<< "Deferred producer call is allready set to: " <<
358 *deferredProducer
<< ", requested call: " << *producer
);
362 StoreEntry::kickProducer()
364 if (deferredProducer
!= NULL
) {
365 ScheduleCallHere(deferredProducer
);
366 deferredProducer
= NULL
;
372 StoreEntry::destroyMemObject()
374 debugs(20, 3, mem_obj
<< " in " << *this);
376 // Store::Root() is FATALly missing during shutdown
377 if (hasTransients() && !shutting_down
)
378 Store::Root().transientsDisconnect(*this);
379 if (hasMemStore() && !shutting_down
)
380 Store::Root().memoryDisconnect(*this);
382 if (MemObject
*mem
= mem_obj
) {
383 setMemStatus(NOT_IN_MEMORY
);
390 destroyStoreEntry(void *data
)
392 debugs(20, 3, HERE
<< "destroyStoreEntry: destroying " << data
);
393 StoreEntry
*e
= static_cast<StoreEntry
*>(static_cast<hash_link
*>(data
));
396 if (e
== NullStoreEntry::getInstance())
399 // Store::Root() is FATALly missing during shutdown
400 if (e
->hasDisk() && !shutting_down
)
401 e
->disk().disconnect(*e
);
403 e
->destroyMemObject();
407 assert(e
->key
== NULL
);
412 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
415 StoreEntry::hashInsert(const cache_key
* someKey
)
417 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey
) << "'");
419 key
= storeKeyDup(someKey
);
420 hash_join(store_table
, this);
424 StoreEntry::hashDelete()
426 if (key
) { // some test cases do not create keys and do not hashInsert()
427 hash_remove_link(store_table
, this);
428 storeKeyFree((const cache_key
*)key
);
433 /* -------------------------------------------------------------------------- */
436 StoreEntry::lock(const char *context
)
439 debugs(20, 3, context
<< " locked key " << getMD5Text() << ' ' << *this);
445 lastref
= squid_curtime
;
449 StoreEntry::releaseRequest(const bool shareable
)
451 debugs(20, 3, shareable
<< ' ' << *this);
453 shareableWhenPrivate
= false; // may already be false
454 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
457 setPrivateKey(shareable
, true);
461 StoreEntry::unlock(const char *context
)
463 debugs(20, 3, (context
? context
: "somebody") <<
464 " unlocking key " << getMD5Text() << ' ' << *this);
465 assert(lock_count
> 0);
469 return (int) lock_count
;
475 /// keep the unlocked StoreEntry object in the local store_table (if needed) or
476 /// delete it (otherwise)
478 StoreEntry::doAbandon(const char *context
)
480 debugs(20, 5, *this << " via " << (context
? context
: "somebody"));
482 assert(storePendingNClients(this) == 0);
484 // Both aborted local writers and aborted local readers (of remote writers)
485 // are STORE_PENDING, but aborted readers should never release().
486 if (EBIT_TEST(flags
, RELEASE_REQUEST
) ||
487 (store_status
== STORE_PENDING
&& !Store::Root().transientsReader(*this))) {
492 if (EBIT_TEST(flags
, KEY_PRIVATE
))
493 debugs(20, DBG_IMPORTANT
, "WARNING: " << __FILE__
<< ":" << __LINE__
<< ": found KEY_PRIVATE");
495 Store::Root().handleIdleEntry(*this); // may delete us
499 StoreEntry::getPublicByRequestMethod (StoreClient
*aClient
, HttpRequest
* request
, const HttpRequestMethod
& method
)
502 StoreEntry
*result
= storeGetPublicByRequestMethod( request
, method
);
505 aClient
->created (NullStoreEntry::getInstance());
507 aClient
->created (result
);
511 StoreEntry::getPublicByRequest (StoreClient
*aClient
, HttpRequest
* request
)
514 StoreEntry
*result
= storeGetPublicByRequest (request
);
517 result
= NullStoreEntry::getInstance();
519 aClient
->created (result
);
523 StoreEntry::getPublic (StoreClient
*aClient
, const char *uri
, const HttpRequestMethod
& method
)
526 StoreEntry
*result
= storeGetPublic (uri
, method
);
529 result
= NullStoreEntry::getInstance();
531 aClient
->created (result
);
535 storeGetPublic(const char *uri
, const HttpRequestMethod
& method
)
537 return Store::Root().find(storeKeyPublic(uri
, method
));
541 storeGetPublicByRequestMethod(HttpRequest
* req
, const HttpRequestMethod
& method
, const KeyScope keyScope
)
543 return Store::Root().find(storeKeyPublicByRequestMethod(req
, method
, keyScope
));
547 storeGetPublicByRequest(HttpRequest
* req
, const KeyScope keyScope
)
549 StoreEntry
*e
= storeGetPublicByRequestMethod(req
, req
->method
, keyScope
);
551 if (e
== NULL
&& req
->method
== Http::METHOD_HEAD
)
552 /* We can generate a HEAD reply from a cached GET object */
553 e
= storeGetPublicByRequestMethod(req
, Http::METHOD_GET
, keyScope
);
561 static int key_counter
= 0;
563 if (++key_counter
< 0)
569 /* RBC 20050104 AFAICT this should become simpler:
570 * rather than reinserting with a special key it should be marked
571 * as 'released' and then cleaned up when refcounting indicates.
572 * the StoreHashIndex could well implement its 'released' in the
574 * Also, clean log writing should skip over ia,t
575 * Otherwise, we need a 'remove from the index but not the store
579 StoreEntry::setPrivateKey(const bool shareable
, const bool permanent
)
581 debugs(20, 3, shareable
<< permanent
<< ' ' << *this);
583 EBIT_SET(flags
, RELEASE_REQUEST
); // may already be set
585 shareableWhenPrivate
= false; // may already be false
587 if (EBIT_TEST(flags
, KEY_PRIVATE
))
591 Store::Root().evictCached(*this); // all caches/workers will know
595 if (mem_obj
&& mem_obj
->hasUris())
596 mem_obj
->id
= getKeyCounter();
597 const cache_key
*newkey
= storeKeyPrivate();
599 assert(hash_lookup(store_table
, newkey
) == NULL
);
600 EBIT_SET(flags
, KEY_PRIVATE
);
601 shareableWhenPrivate
= shareable
;
606 StoreEntry::setPublicKey(const KeyScope scope
)
608 debugs(20, 3, *this);
609 if (key
&& !EBIT_TEST(flags
, KEY_PRIVATE
))
610 return true; // already public
615 * We can't make RELEASE_REQUEST objects public. Depending on
616 * when RELEASE_REQUEST gets set, we might not be swapping out
617 * the object. If we're not swapping out, then subsequent
618 * store clients won't be able to access object data which has
619 * been freed from memory.
621 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
623 #if MORE_DEBUG_OUTPUT
625 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
626 debugs(20, DBG_IMPORTANT
, "assertion failed: RELEASE key " << key
<< ", url " << mem_obj
->url
);
630 assert(!EBIT_TEST(flags
, RELEASE_REQUEST
));
633 EntryGuard
newVaryMarker(adjustVary(), "setPublicKey+failure");
634 const cache_key
*pubKey
= calcPublicKey(scope
);
635 Store::Root().addWriting(this, pubKey
);
636 forcePublicKey(pubKey
);
637 newVaryMarker
.unlockAndReset("setPublicKey+success");
639 } catch (const std::exception
&ex
) {
640 debugs(20, 2, "for " << *this << " failed: " << ex
.what());
646 StoreEntry::clearPublicKeyScope()
648 if (!key
|| EBIT_TEST(flags
, KEY_PRIVATE
))
649 return; // probably the old public key was deleted or made private
651 // TODO: adjustVary() when collapsed revalidation supports that
653 const cache_key
*newKey
= calcPublicKey(ksDefault
);
654 if (!storeKeyHashCmp(key
, newKey
))
655 return; // probably another collapsed revalidation beat us to this change
657 forcePublicKey(newKey
);
660 /// Unconditionally sets public key for this store entry.
661 /// Releases the old entry with the same public key (if any).
663 StoreEntry::forcePublicKey(const cache_key
*newkey
)
665 debugs(20, 3, storeKeyText(newkey
) << " for " << *this);
668 if (StoreEntry
*e2
= (StoreEntry
*)hash_lookup(store_table
, newkey
)) {
670 debugs(20, 3, "releasing clashing " << *e2
);
679 assert(mem_obj
->hasUris());
683 storeDirSwapLog(this, SWAP_LOG_ADD
);
686 /// Calculates correct public key for feeding forcePublicKey().
687 /// Assumes adjustVary() has been called for this entry already.
689 StoreEntry::calcPublicKey(const KeyScope keyScope
)
692 return mem_obj
->request
? storeKeyPublicByRequest(mem_obj
->request
.getRaw(), keyScope
) :
693 storeKeyPublic(mem_obj
->storeId(), mem_obj
->method
, keyScope
);
696 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
697 /// The vary_headers field is used to calculate the Vary marker key.
698 /// Releases the old Vary marker with an outdated key (if any).
699 /// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
700 /// \throws std::exception on failures
702 StoreEntry::adjustVary()
706 if (!mem_obj
->request
)
709 HttpRequestPointer
request(mem_obj
->request
);
711 if (mem_obj
->vary_headers
.isEmpty()) {
712 /* First handle the case where the object no longer varies */
713 request
->vary_headers
.clear();
715 if (!request
->vary_headers
.isEmpty() && request
->vary_headers
.cmp(mem_obj
->vary_headers
) != 0) {
716 /* Oops.. the variance has changed. Kill the base object
717 * to record the new variance key
719 request
->vary_headers
.clear(); /* free old "bad" variance key */
720 if (StoreEntry
*pe
= storeGetPublic(mem_obj
->storeId(), mem_obj
->method
))
724 /* Make sure the request knows the variance status */
725 if (request
->vary_headers
.isEmpty())
726 request
->vary_headers
= httpMakeVaryMark(request
.getRaw(), mem_obj
->getReply().getRaw());
729 // TODO: storeGetPublic() calls below may create unlocked entries.
730 // We should add/use storeHas() API or lock/unlock those entries.
731 if (!mem_obj
->vary_headers
.isEmpty() && !storeGetPublic(mem_obj
->storeId(), mem_obj
->method
)) {
732 /* Create "vary" base object */
733 StoreEntry
*pe
= storeCreateEntry(mem_obj
->storeId(), mem_obj
->logUri(), request
->flags
, request
->method
);
734 // XXX: storeCreateEntry() already tries to make `pe` public under
735 // certain conditions. If those conditions do not apply to Vary markers,
736 // then refactor to call storeCreatePureEntry() above. Otherwise,
737 // refactor to simply check whether `pe` is already public below.
738 if (!pe
->makePublic()) {
739 pe
->unlock("StoreEntry::adjustVary+failed_makePublic");
740 throw TexcHere("failed to make Vary marker public");
742 /* We are allowed to do this typecast */
743 HttpReply
*rep
= new HttpReply
;
744 rep
->setHeaders(Http::scOkay
, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime
+ 100000);
745 String vary
= mem_obj
->getReply()->header
.getList(Http::HdrType::VARY
);
748 /* Again, we own this structure layout */
749 rep
->header
.putStr(Http::HdrType::VARY
, vary
.termedBuf());
753 #if X_ACCELERATOR_VARY
754 vary
= mem_obj
->getReply()->header
.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY
);
756 if (vary
.size() > 0) {
757 /* Again, we own this structure layout */
758 rep
->header
.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY
, vary
.termedBuf());
763 pe
->replaceHttpReply(rep
, false); // no write until timestampsSet()
767 pe
->startWriting(); // after timestampsSet()
777 storeCreatePureEntry(const char *url
, const char *log_url
, const HttpRequestMethod
& method
)
779 StoreEntry
*e
= NULL
;
780 debugs(20, 3, "storeCreateEntry: '" << url
<< "'");
782 e
= new StoreEntry();
783 e
->createMemObject(url
, log_url
, method
);
785 e
->store_status
= STORE_PENDING
;
787 e
->lastref
= squid_curtime
;
788 e
->timestamp
= -1; /* set in StoreEntry::timestampsSet() */
789 e
->ping_status
= PING_NONE
;
790 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
795 storeCreateEntry(const char *url
, const char *logUrl
, const RequestFlags
&flags
, const HttpRequestMethod
& method
)
797 StoreEntry
*e
= storeCreatePureEntry(url
, logUrl
, method
);
798 e
->lock("storeCreateEntry");
800 if (!neighbors_do_private_keys
&& flags
.hierarchical
&& flags
.cachable
&& e
->setPublicKey())
803 e
->setPrivateKey(false, !flags
.cachable
);
807 /* Mark object as expired */
809 StoreEntry::expireNow()
811 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
812 expires
= squid_curtime
;
816 StoreEntry::write (StoreIOBuffer writeBuffer
)
818 assert(mem_obj
!= NULL
);
819 /* This assert will change when we teach the store to update */
820 PROF_start(StoreEntry_write
);
821 assert(store_status
== STORE_PENDING
);
823 // XXX: caller uses content offset, but we also store headers
824 if (const HttpReplyPointer reply
= mem_obj
->getReply())
825 writeBuffer
.offset
+= reply
->hdr_sz
;
827 debugs(20, 5, "storeWrite: writing " << writeBuffer
.length
<< " bytes for '" << getMD5Text() << "'");
828 PROF_stop(StoreEntry_write
);
829 storeGetMemSpace(writeBuffer
.length
);
830 mem_obj
->write(writeBuffer
);
832 if (!EBIT_TEST(flags
, DELAY_SENDING
))
836 /* Append incoming data from a primary server to an entry. */
838 StoreEntry::append(char const *buf
, int len
)
840 assert(mem_obj
!= NULL
);
842 assert(store_status
== STORE_PENDING
);
844 StoreIOBuffer tempBuffer
;
845 tempBuffer
.data
= (char *)buf
;
846 tempBuffer
.length
= len
;
848 * XXX sigh, offset might be < 0 here, but it gets "corrected"
849 * later. This offset crap is such a mess.
851 tempBuffer
.offset
= mem_obj
->endOffset() - (getReply() ? getReply()->hdr_sz
: 0);
856 StoreEntry::vappendf(const char *fmt
, va_list vargs
)
858 LOCAL_ARRAY(char, buf
, 4096);
864 /* Fix of bug 753r. The value of vargs is undefined
865 * after vsnprintf() returns. Make a copy of vargs
866 * incase we loop around and call vsnprintf() again.
870 if ((x
= vsnprintf(buf
, sizeof(buf
), fmt
, ap
)) < 0) {
871 fatal(xstrerr(errno
));
877 if ((x
= vsnprintf(buf
, sizeof(buf
), fmt
, vargs
)) < 0) {
878 fatal(xstrerr(errno
));
883 if (x
< static_cast<int>(sizeof(buf
))) {
888 // okay, do it the slow way.
889 char *buf2
= new char[x
+1];
890 int y
= vsnprintf(buf2
, x
+1, fmt
, vargs
);
891 assert(y
>= 0 && y
== x
);
896 // deprecated. use StoreEntry::appendf() instead.
898 storeAppendPrintf(StoreEntry
* e
, const char *fmt
,...)
902 e
->vappendf(fmt
, args
);
906 // deprecated. use StoreEntry::appendf() instead.
908 storeAppendVPrintf(StoreEntry
* e
, const char *fmt
, va_list vargs
)
910 e
->vappendf(fmt
, vargs
);
913 struct _store_check_cachable_hist
{
917 int not_entry_cachable
;
918 int wrong_content_length
;
923 int too_many_open_files
;
924 int too_many_open_fds
;
931 } store_check_cachable_hist
;
934 storeTooManyDiskFilesOpen(void)
936 if (Config
.max_open_disk_fds
== 0)
939 if (store_open_disk_fd
> Config
.max_open_disk_fds
)
946 StoreEntry::checkTooSmall()
948 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
951 if (STORE_OK
== store_status
)
952 if (mem_obj
->object_sz
>= 0 &&
953 mem_obj
->object_sz
< Config
.Store
.minObjectSize
)
955 if (getReply()->content_length
> -1)
956 if (getReply()->content_length
< Config
.Store
.minObjectSize
)
962 StoreEntry::checkTooBig() const
964 if (mem_obj
->endOffset() > store_maxobjsize
)
967 if (getReply()->content_length
< 0)
970 return (getReply()->content_length
> store_maxobjsize
);
973 // TODO: move "too many open..." checks outside -- we are called too early/late
975 StoreEntry::checkCachable()
977 // XXX: This method is used for both memory and disk caches, but some
978 // checks are specific to disk caches. Move them to mayStartSwapOut().
980 // XXX: This method may be called several times, sometimes with different
981 // outcomes, making store_check_cachable_hist counters misleading.
983 // check this first to optimize handling of repeated calls for uncachables
984 if (EBIT_TEST(flags
, RELEASE_REQUEST
)) {
985 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
986 ++store_check_cachable_hist
.no
.not_entry_cachable
; // TODO: rename?
987 return 0; // avoid rerequesting release below
990 #if CACHE_ALL_METHODS
992 if (mem_obj
->method
!= Http::METHOD_GET
) {
993 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
994 ++store_check_cachable_hist
.no
.non_get
;
997 if (store_status
== STORE_OK
&& EBIT_TEST(flags
, ENTRY_BAD_LENGTH
)) {
998 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
999 ++store_check_cachable_hist
.no
.wrong_content_length
;
1000 } else if (EBIT_TEST(flags
, ENTRY_NEGCACHED
)) {
1001 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
1002 ++store_check_cachable_hist
.no
.negative_cached
;
1003 return 0; /* avoid release call below */
1004 } else if (!mem_obj
|| !getReply()) {
1005 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1006 // this segfault protection, but how can we get such a HIT?
1007 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1008 ++store_check_cachable_hist
.no
.missing_parts
;
1009 } else if (checkTooBig()) {
1010 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1011 ++store_check_cachable_hist
.no
.too_big
;
1012 } else if (checkTooSmall()) {
1013 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1014 ++store_check_cachable_hist
.no
.too_small
;
1015 } else if (EBIT_TEST(flags
, KEY_PRIVATE
)) {
1016 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1017 ++store_check_cachable_hist
.no
.private_key
;
1018 } else if (hasDisk()) {
1020 * the remaining cases are only relevant if we haven't
1021 * started swapping out the object yet.
1024 } else if (storeTooManyDiskFilesOpen()) {
1025 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1026 ++store_check_cachable_hist
.no
.too_many_open_files
;
1027 } else if (fdNFree() < RESERVED_FD
) {
1028 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1029 ++store_check_cachable_hist
.no
.too_many_open_fds
;
1031 ++store_check_cachable_hist
.yes
.Default
;
1040 storeCheckCachableStats(StoreEntry
*sentry
)
1042 storeAppendPrintf(sentry
, "Category\t Count\n");
1044 #if CACHE_ALL_METHODS
1046 storeAppendPrintf(sentry
, "no.non_get\t%d\n",
1047 store_check_cachable_hist
.no
.non_get
);
1050 storeAppendPrintf(sentry
, "no.not_entry_cachable\t%d\n",
1051 store_check_cachable_hist
.no
.not_entry_cachable
);
1052 storeAppendPrintf(sentry
, "no.wrong_content_length\t%d\n",
1053 store_check_cachable_hist
.no
.wrong_content_length
);
1054 storeAppendPrintf(sentry
, "no.negative_cached\t%d\n",
1055 store_check_cachable_hist
.no
.negative_cached
);
1056 storeAppendPrintf(sentry
, "no.missing_parts\t%d\n",
1057 store_check_cachable_hist
.no
.missing_parts
);
1058 storeAppendPrintf(sentry
, "no.too_big\t%d\n",
1059 store_check_cachable_hist
.no
.too_big
);
1060 storeAppendPrintf(sentry
, "no.too_small\t%d\n",
1061 store_check_cachable_hist
.no
.too_small
);
1062 storeAppendPrintf(sentry
, "no.private_key\t%d\n",
1063 store_check_cachable_hist
.no
.private_key
);
1064 storeAppendPrintf(sentry
, "no.too_many_open_files\t%d\n",
1065 store_check_cachable_hist
.no
.too_many_open_files
);
1066 storeAppendPrintf(sentry
, "no.too_many_open_fds\t%d\n",
1067 store_check_cachable_hist
.no
.too_many_open_fds
);
1068 storeAppendPrintf(sentry
, "yes.default\t%d\n",
1069 store_check_cachable_hist
.yes
.Default
);
1073 StoreEntry::lengthWentBad(const char *reason
)
1075 debugs(20, 3, "because " << reason
<< ": " << *this);
1076 EBIT_SET(flags
, ENTRY_BAD_LENGTH
);
1081 StoreEntry::complete()
1083 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1085 if (store_status
!= STORE_PENDING
) {
1087 * if we're not STORE_PENDING, then probably we got aborted
1088 * and there should be NO clients on this entry
1090 assert(EBIT_TEST(flags
, ENTRY_ABORTED
));
1091 assert(mem_obj
->nclients
== 0);
1095 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1096 * in use of object_sz?
1098 mem_obj
->object_sz
= mem_obj
->endOffset();
1100 store_status
= STORE_OK
;
1102 assert(mem_status
== NOT_IN_MEMORY
);
1104 if (!EBIT_TEST(flags
, ENTRY_BAD_LENGTH
) && !validLength())
1105 lengthWentBad("!validLength() in complete()");
1107 #if USE_CACHE_DIGESTS
1108 if (mem_obj
->request
)
1109 mem_obj
->request
->hier
.store_complete_stop
= current_time
;
1113 * We used to call invokeHandlers, then storeSwapOut. However,
1114 * Madhukar Reddy <myreddy@persistence.com> reported that
1115 * responses without content length would sometimes get released
1116 * in client_side, thinking that the response is incomplete.
1122 * Someone wants to abort this transfer. Set the reason in the
1123 * request structure, call the callback and mark the
1124 * entry for releasing
1129 ++statCounter
.aborted_requests
;
1130 assert(store_status
== STORE_PENDING
);
1131 assert(mem_obj
!= NULL
);
1132 debugs(20, 6, "storeAbort: " << getMD5Text());
1134 lock("StoreEntry::abort"); /* lock while aborting */
1139 EBIT_SET(flags
, ENTRY_ABORTED
);
1141 setMemStatus(NOT_IN_MEMORY
);
1143 store_status
= STORE_OK
;
1145 /* Notify the server side */
1149 * Should we check abort.data for validity?
1151 if (mem_obj
->abort
.callback
) {
1152 if (!cbdataReferenceValid(mem_obj
->abort
.data
))
1153 debugs(20, DBG_IMPORTANT
,HERE
<< "queueing event when abort.data is not valid");
1154 eventAdd("mem_obj->abort.callback",
1155 mem_obj
->abort
.callback
,
1156 mem_obj
->abort
.data
,
1162 /* XXX Should we reverse these two, so that there is no
1163 * unneeded disk swapping triggered?
1165 /* Notify the client side */
1168 // abort swap out, invalidating what was created so far (release follows)
1169 swapOutFileClose(StoreIOState::writerGone
);
1171 unlock("StoreEntry::abort"); /* unlock */
1175 * Clear Memory storage to accommodate the given object len
1178 storeGetMemSpace(int size
)
1180 PROF_start(storeGetMemSpace
);
1181 if (!shutting_down
) // Store::Root() is FATALly missing during shutdown
1182 Store::Root().freeMemorySpace(size
);
1183 PROF_stop(storeGetMemSpace
);
1186 /* thunk through to Store::Root().maintain(). Note that this would be better still
1187 * if registered against the root store itself, but that requires more complex
1188 * update logic - bigger fish to fry first. Long term each store when
1189 * it becomes active will self register
1192 Store::Maintain(void *)
1194 Store::Root().maintain();
1196 /* Reregister a maintain event .. */
1197 eventAdd("MaintainSwapSpace", Maintain
, NULL
, 1.0, 1);
1201 /* The maximum objects to scan for maintain storage space */
1202 #define MAINTAIN_MAX_SCAN 1024
1203 #define MAINTAIN_MAX_REMOVE 64
1206 StoreEntry::release(const bool shareable
)
1208 PROF_start(storeRelease
);
1209 debugs(20, 3, shareable
<< ' ' << *this << ' ' << getMD5Text());
1210 /* If, for any reason we can't discard this object because of an
1211 * outstanding request, mark it for pending release */
1214 releaseRequest(shareable
);
1215 PROF_stop(storeRelease
);
1219 if (Store::Controller::store_dirs_rebuilding
&& hasDisk()) {
1220 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1222 // lock the entry until rebuilding is done
1223 lock("storeLateRelease");
1224 releaseRequest(shareable
);
1225 LateReleaseStack
.push(this);
1229 storeLog(STORE_LOG_RELEASE
, this);
1230 Store::Root().evictCached(*this);
1231 destroyStoreEntry(static_cast<hash_link
*>(this));
1232 PROF_stop(storeRelease
);
1236 storeLateRelease(void *)
1241 if (Store::Controller::store_dirs_rebuilding
) {
1242 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 1.0, 1);
1246 // TODO: this works but looks unelegant.
1247 for (int i
= 0; i
< 10; ++i
) {
1248 if (LateReleaseStack
.empty()) {
1249 debugs(20, DBG_IMPORTANT
, "storeLateRelease: released " << n
<< " objects");
1252 e
= LateReleaseStack
.top();
1253 LateReleaseStack
.pop();
1256 e
->unlock("storeLateRelease");
1260 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 0.0, 1);
1263 /* return 1 if a store entry is locked */
1265 StoreEntry::locked() const
1271 * SPECIAL, PUBLIC entries should be "locked";
1272 * XXX: Their owner should lock them then instead of relying on this hack.
1274 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
1275 if (!EBIT_TEST(flags
, KEY_PRIVATE
))
1282 StoreEntry::validLength() const
1285 const HttpReply
*reply
;
1286 assert(mem_obj
!= NULL
);
1288 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1289 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1291 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply
->hdr_sz
);
1292 debugs(20, 5, "storeEntryValidLength: content_length = " << reply
->content_length
);
1294 if (reply
->content_length
< 0) {
1295 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1299 if (reply
->hdr_sz
== 0) {
1300 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1304 if (mem_obj
->method
== Http::METHOD_HEAD
) {
1305 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1309 if (reply
->sline
.status() == Http::scNotModified
)
1312 if (reply
->sline
.status() == Http::scNoContent
)
1315 diff
= reply
->hdr_sz
+ reply
->content_length
- objectLen();
1320 debugs(20, 3, "storeEntryValidLength: " << (diff
< 0 ? -diff
: diff
) << " bytes too " << (diff
< 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1326 storeRegisterWithCacheManager(void)
1328 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats
, 0, 1);
1329 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create
, 0, 1);
1330 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1331 storeCheckCachableStats
, 0, 1);
1338 mem_policy
= createRemovalPolicy(Config
.memPolicy
);
1341 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 1.0, 1);
1342 Store::Root().init();
1343 storeRebuildStart();
1345 storeRegisterWithCacheManager();
1349 storeConfigure(void)
1351 Store::Root().updateLimits();
1355 StoreEntry::memoryCachable()
1357 if (!checkCachable())
1360 if (mem_obj
== NULL
)
1363 if (mem_obj
->data_hdr
.size() == 0)
1366 if (mem_obj
->inmem_lo
!= 0)
1369 if (!Config
.onoff
.memory_cache_first
&& swappedOut() && refcount
== 1)
1376 StoreEntry::checkNegativeHit() const
1378 if (!EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1381 if (expires
<= squid_curtime
)
1384 if (store_status
!= STORE_OK
)
1391 * Set object for negative caching.
1392 * Preserves any expiry information given by the server.
1393 * In absence of proper expiry info it will set to expire immediately,
1394 * or with HTTP-violations enabled the configured negative-TTL is observed
1397 StoreEntry::negativeCache()
1399 // XXX: should make the default for expires 0 instead of -1
1400 // so we can distinguish "Expires: -1" from nothing.
1402 #if USE_HTTP_VIOLATIONS
1403 expires
= squid_curtime
+ Config
.negativeTtl
;
1405 expires
= squid_curtime
;
1407 EBIT_SET(flags
, ENTRY_NEGCACHED
);
1411 storeFreeMemory(void)
1413 Store::FreeMemory();
1414 #if USE_CACHE_DIGESTS
1415 delete store_digest
;
1417 store_digest
= NULL
;
1421 expiresMoreThan(time_t expires
, time_t when
)
1423 if (expires
< 0) /* No Expires given */
1426 return (expires
> (squid_curtime
+ when
));
1430 StoreEntry::validToSend() const
1432 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
1435 if (EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1436 if (expires
<= squid_curtime
)
1439 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
1442 // now check that the entry has a cache backing or is collapsed
1443 if (hasDisk()) // backed by a disk cache
1446 if (swappingOut()) // will be backed by a disk cache
1449 if (!mem_obj
) // not backed by a memory cache and not collapsed
1452 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1453 // disk cache backing that store_client constructor will assert. XXX: This
1454 // is wrong for range requests (that could feed off nibbled memory) and for
1455 // entries backed by the shared memory cache (that could, in theory, get
1456 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1457 if (mem_obj
->inmem_lo
) // in memory cache, but got nibbled at
1460 // The following check is correct but useless at this position. TODO: Move
1461 // it up when the shared memory cache can either replenish locally nibbled
1462 // bytes or, better, does not use local RAM copy at all.
1463 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1470 StoreEntry::timestampsSet()
1472 const HttpReply
*reply
= getReply();
1473 time_t served_date
= reply
->date
;
1474 int age
= reply
->header
.getInt(Http::HdrType::AGE
);
1475 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1476 /* make sure that 0 <= served_date <= squid_curtime */
1478 if (served_date
< 0 || served_date
> squid_curtime
)
1479 served_date
= squid_curtime
;
1482 * If the returned Date: is more than 24 hours older than
1483 * the squid_curtime, then one of us needs to use NTP to set our
1484 * clock. We'll pretend that our clock is right.
1486 else if (served_date
< (squid_curtime
- 24 * 60 * 60) )
1487 served_date
= squid_curtime
;
1490 * Compensate with Age header if origin server clock is ahead
1491 * of us and there is a cache in between us and the origin
1492 * server. But DONT compensate if the age value is larger than
1493 * squid_curtime because it results in a negative served_date.
1495 if (age
> squid_curtime
- served_date
)
1496 if (squid_curtime
> age
)
1497 served_date
= squid_curtime
- age
;
1499 // compensate for Squid-to-server and server-to-Squid delays
1500 if (mem_obj
&& mem_obj
->request
) {
1501 struct timeval responseTime
;
1502 if (mem_obj
->request
->hier
.peerResponseTime(responseTime
))
1503 served_date
-= responseTime
.tv_sec
;
1507 if (reply
->expires
> 0 && reply
->date
> -1)
1508 exp
= served_date
+ (reply
->expires
- reply
->date
);
1510 exp
= reply
->expires
;
1512 if (timestamp
== served_date
&& expires
== exp
) {
1513 // if the reply lacks LMT, then we now know that our effective
1514 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1515 // new modification times must match
1516 if (reply
->last_modified
< 0 || reply
->last_modified
== lastModified())
1517 return false; // nothing has changed
1522 lastModified_
= reply
->last_modified
;
1524 timestamp
= served_date
;
1530 StoreEntry::registerAbort(STABH
* cb
, void *data
)
1533 assert(mem_obj
->abort
.callback
== NULL
);
1534 mem_obj
->abort
.callback
= cb
;
1535 mem_obj
->abort
.data
= cbdataReference(data
);
1539 StoreEntry::unregisterAbort()
1542 if (mem_obj
->abort
.callback
) {
1543 mem_obj
->abort
.callback
= NULL
;
1544 cbdataReferenceDone(mem_obj
->abort
.data
);
1549 StoreEntry::dump(int l
) const
1551 debugs(20, l
, "StoreEntry->key: " << getMD5Text());
1552 debugs(20, l
, "StoreEntry->next: " << next
);
1553 debugs(20, l
, "StoreEntry->mem_obj: " << mem_obj
);
1554 debugs(20, l
, "StoreEntry->timestamp: " << timestamp
);
1555 debugs(20, l
, "StoreEntry->lastref: " << lastref
);
1556 debugs(20, l
, "StoreEntry->expires: " << expires
);
1557 debugs(20, l
, "StoreEntry->lastModified_: " << lastModified_
);
1558 debugs(20, l
, "StoreEntry->swap_file_sz: " << swap_file_sz
);
1559 debugs(20, l
, "StoreEntry->refcount: " << refcount
);
1560 debugs(20, l
, "StoreEntry->flags: " << storeEntryFlags(this));
1561 debugs(20, l
, "StoreEntry->swap_dirn: " << swap_dirn
);
1562 debugs(20, l
, "StoreEntry->swap_filen: " << swap_filen
);
1563 debugs(20, l
, "StoreEntry->lock_count: " << lock_count
);
1564 debugs(20, l
, "StoreEntry->mem_status: " << mem_status
);
1565 debugs(20, l
, "StoreEntry->ping_status: " << ping_status
);
1566 debugs(20, l
, "StoreEntry->store_status: " << store_status
);
1567 debugs(20, l
, "StoreEntry->swap_status: " << swap_status
);
1571 * NOTE, this function assumes only two mem states
1574 StoreEntry::setMemStatus(mem_status_t new_status
)
1576 if (new_status
== mem_status
)
1579 // are we using a shared memory cache?
1580 if (Config
.memShared
&& IamWorkerProcess()) {
1581 // This method was designed to update replacement policy, not to
1582 // actually purge something from the memory cache (TODO: rename?).
1583 // Shared memory cache does not have a policy that needs updates.
1584 mem_status
= new_status
;
1588 assert(mem_obj
!= NULL
);
1590 if (new_status
== IN_MEMORY
) {
1591 assert(mem_obj
->inmem_lo
== 0);
1593 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1594 debugs(20, 4, "not inserting special " << *this << " into policy");
1596 mem_policy
->Add(mem_policy
, this, &mem_obj
->repl
);
1597 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1600 ++hot_obj_count
; // TODO: maintain for the shared hot cache as well
1602 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1603 debugs(20, 4, "not removing special " << *this << " from policy");
1605 mem_policy
->Remove(mem_policy
, this, &mem_obj
->repl
);
1606 debugs(20, 4, "removed " << *this);
1612 mem_status
= new_status
;
1616 StoreEntry::url() const
1618 if (mem_obj
== NULL
)
1619 return "[null_mem_obj]";
1621 return mem_obj
->storeId();
1625 StoreEntry::createMemObject()
1628 mem_obj
= new MemObject();
1632 StoreEntry::createMemObject(const char *aUrl
, const char *aLogUrl
, const HttpRequestMethod
&aMethod
)
1635 ensureMemObject(aUrl
, aLogUrl
, aMethod
);
1639 StoreEntry::ensureMemObject(const char *aUrl
, const char *aLogUrl
, const HttpRequestMethod
&aMethod
)
1642 mem_obj
= new MemObject();
1643 mem_obj
->setUris(aUrl
, aLogUrl
, aMethod
);
1646 /** disable sending content to the clients.
1648 * This just sets DELAY_SENDING.
1651 StoreEntry::buffer()
1653 EBIT_SET(flags
, DELAY_SENDING
);
1656 /** flush any buffered content.
1658 * This just clears DELAY_SENDING and Invokes the handlers
1659 * to begin sending anything that may be buffered.
1664 if (EBIT_TEST(flags
, DELAY_SENDING
)) {
1665 EBIT_CLR(flags
, DELAY_SENDING
);
1671 StoreEntry::objectLen() const
1673 assert(mem_obj
!= NULL
);
1674 return mem_obj
->object_sz
;
1678 StoreEntry::contentLen() const
1680 assert(mem_obj
!= NULL
);
1681 assert(getReply() != NULL
);
1682 return objectLen() - getReply()->hdr_sz
;
1686 StoreEntry::getReply() const
1688 return (mem_obj
? mem_obj
->getReply().getRaw() : nullptr);
1695 debugs(20, 3, url());
1697 expires
= lastModified_
= timestamp
= -1;
1703 * This routine calls the SETUP routine for each fs type.
1704 * I don't know where the best place for this is, and I'm not going to shuffle
1705 * around large chunks of code right now (that can be done once its working.)
1714 * called to add another store removal policy module
1717 storeReplAdd(const char *type
, REMOVALPOLICYCREATE
* create
)
1721 /* find the number of currently known repl types */
1722 for (i
= 0; storerepl_list
&& storerepl_list
[i
].typestr
; ++i
) {
1723 if (strcmp(storerepl_list
[i
].typestr
, type
) == 0) {
1724 debugs(20, DBG_IMPORTANT
, "WARNING: Trying to load store replacement policy " << type
<< " twice.");
1729 /* add the new type */
1730 storerepl_list
= static_cast<storerepl_entry_t
*>(xrealloc(storerepl_list
, (i
+ 2) * sizeof(storerepl_entry_t
)));
1732 memset(&storerepl_list
[i
+ 1], 0, sizeof(storerepl_entry_t
));
1734 storerepl_list
[i
].typestr
= type
;
1736 storerepl_list
[i
].create
= create
;
1740 * Create a removal policy instance
1743 createRemovalPolicy(RemovalPolicySettings
* settings
)
1745 storerepl_entry_t
*r
;
1747 for (r
= storerepl_list
; r
&& r
->typestr
; ++r
) {
1748 if (strcmp(r
->typestr
, settings
->type
) == 0)
1749 return r
->create(settings
->args
);
1752 debugs(20, DBG_IMPORTANT
, "ERROR: Unknown policy " << settings
->type
);
1753 debugs(20, DBG_IMPORTANT
, "ERROR: Be sure to have set cache_replacement_policy");
1754 debugs(20, DBG_IMPORTANT
, "ERROR: and memory_replacement_policy in squid.conf!");
1755 fatalf("ERROR: Unknown policy %s\n", settings
->type
);
1756 return NULL
; /* NOTREACHED */
1761 storeSwapFileNumberSet(StoreEntry
* e
, sfileno filn
)
1763 if (e
->swap_file_number
== filn
)
1768 storeDirMapBitReset(e
->swap_file_number
);
1769 storeDirLRUDelete(e
);
1770 e
->swap_file_number
= -1;
1772 assert(-1 == e
->swap_file_number
);
1773 storeDirMapBitSet(e
->swap_file_number
= filn
);
1781 StoreEntry::storeErrorResponse(HttpReply
*reply
)
1783 lock("StoreEntry::storeErrorResponse");
1785 replaceHttpReply(reply
);
1789 releaseRequest(false); // if it is safe to negatively cache, sharing is OK
1790 unlock("StoreEntry::storeErrorResponse");
1794 * Replace a store entry with
1795 * a new reply. This eats the reply.
1798 StoreEntry::replaceHttpReply(HttpReply
*rep
, bool andStartWriting
)
1800 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1803 debugs(20, DBG_CRITICAL
, "Attempt to replace object with no in-memory representation");
1807 mem_obj
->replaceReply(HttpReplyPointer(rep
));
1809 if (andStartWriting
)
1814 StoreEntry::startWriting()
1816 /* TODO: when we store headers separately remove the header portion */
1817 /* TODO: mark the length of the headers ? */
1818 /* We ONLY want the headers */
1823 const HttpReply
*rep
= getReply();
1827 rep
->packHeadersInto(this);
1828 mem_obj
->markEndOfReplyHeaders();
1829 EBIT_CLR(flags
, ENTRY_FWD_HDR_WAIT
);
1831 rep
->body
.packInto(this);
1836 StoreEntry::getSerialisedMetaData()
1838 StoreMeta
*tlv_list
= storeSwapMetaBuild(this);
1840 char *result
= storeSwapMetaPack(tlv_list
, &swap_hdr_sz
);
1841 storeSwapTLVFree(tlv_list
);
1842 assert (swap_hdr_sz
>= 0);
1843 mem_obj
->swap_hdr_sz
= (size_t) swap_hdr_sz
;
1848 * Abandon the transient entry our worker has created if neither the shared
1849 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1850 * any, should notice and use Plan B instead of getting stuck waiting for us
1851 * to start swapping the entry out.
1854 StoreEntry::transientsAbandonmentCheck()
1856 if (mem_obj
&& !Store::Root().transientsReader(*this) && // this worker is responsible
1857 hasTransients() && // other workers may be interested
1858 !hasMemStore() && // rejected by the shared memory cache
1859 mem_obj
->swapout
.decision
== MemObject::SwapOut::swImpossible
) {
1860 debugs(20, 7, "cannot be shared: " << *this);
1861 if (!shutting_down
) // Store::Root() is FATALly missing during shutdown
1862 Store::Root().stopSharing(*this);
1867 StoreEntry::memOutDecision(const bool)
1869 transientsAbandonmentCheck();
1873 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision
&decision
)
1875 // Abandon our transient entry if neither shared memory nor disk wants it.
1877 mem_obj
->swapout
.decision
= decision
;
1878 transientsAbandonmentCheck();
1882 StoreEntry::trimMemory(const bool preserveSwappable
)
1886 * Bug #1943. We must not let go any data for IN_MEMORY
1887 * objects. We have to wait until the mem_status changes.
1889 if (mem_status
== IN_MEMORY
)
1892 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
1893 return; // cannot trim because we do not load them again
1895 if (preserveSwappable
)
1896 mem_obj
->trimSwappable();
1898 mem_obj
->trimUnSwappable();
1900 debugs(88, 7, *this << " inmem_lo=" << mem_obj
->inmem_lo
);
1904 StoreEntry::modifiedSince(const time_t ims
, const int imslen
) const
1907 const time_t mod_time
= lastModified();
1909 debugs(88, 3, "modifiedSince: '" << url() << "'");
1911 debugs(88, 3, "modifiedSince: mod_time = " << mod_time
);
1916 /* Find size of the object */
1917 object_length
= getReply()->content_length
;
1919 if (object_length
< 0)
1920 object_length
= contentLen();
1922 if (mod_time
> ims
) {
1923 debugs(88, 3, "--> YES: entry newer than client");
1925 } else if (mod_time
< ims
) {
1926 debugs(88, 3, "--> NO: entry older than client");
1928 } else if (imslen
< 0) {
1929 debugs(88, 3, "--> NO: same LMT, no client length");
1931 } else if (imslen
== object_length
) {
1932 debugs(88, 3, "--> NO: same LMT, same length");
1935 debugs(88, 3, "--> YES: same LMT, different length");
1941 StoreEntry::hasEtag(ETag
&etag
) const
1943 if (const HttpReply
*reply
= getReply()) {
1944 etag
= reply
->header
.getETag(Http::HdrType::ETAG
);
1952 StoreEntry::hasIfMatchEtag(const HttpRequest
&request
) const
1954 const String reqETags
= request
.header
.getList(Http::HdrType::IF_MATCH
);
1955 return hasOneOfEtags(reqETags
, false);
1959 StoreEntry::hasIfNoneMatchEtag(const HttpRequest
&request
) const
1961 const String reqETags
= request
.header
.getList(Http::HdrType::IF_NONE_MATCH
);
1962 // weak comparison is allowed only for HEAD or full-body GET requests
1963 const bool allowWeakMatch
= !request
.flags
.isRanged
&&
1964 (request
.method
== Http::METHOD_GET
|| request
.method
== Http::METHOD_HEAD
);
1965 return hasOneOfEtags(reqETags
, allowWeakMatch
);
1968 /// whether at least one of the request ETags matches entity ETag
1970 StoreEntry::hasOneOfEtags(const String
&reqETags
, const bool allowWeakMatch
) const
1972 const ETag repETag
= getReply()->header
.getETag(Http::HdrType::ETAG
);
1974 static SBuf
asterisk("*", 1);
1975 return strListIsMember(&reqETags
, asterisk
, ',');
1978 bool matched
= false;
1979 const char *pos
= NULL
;
1982 while (!matched
&& strListGetItem(&reqETags
, ',', &item
, &ilen
, &pos
)) {
1983 if (!strncmp(item
, "*", ilen
))
1987 str
.append(item
, ilen
);
1989 if (etagParseInit(&reqETag
, str
.termedBuf())) {
1990 matched
= allowWeakMatch
? etagIsWeakEqual(repETag
, reqETag
) :
1991 etagIsStrongEqual(repETag
, reqETag
);
1999 StoreEntry::disk() const
2002 const RefCount
<Store::Disk
> &sd
= INDEXSD(swap_dirn
);
2008 StoreEntry::hasDisk(const sdirno dirn
, const sfileno filen
) const
2011 if (dirn
< 0 && filen
< 0)
2012 return swap_dirn
>= 0;
2014 const bool matchingDisk
= (swap_dirn
== dirn
);
2015 return filen
< 0 ? matchingDisk
: (matchingDisk
&& swap_filen
== filen
);
2019 StoreEntry::attachToDisk(const sdirno dirn
, const sfileno fno
, const swap_status_t status
)
2021 debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
2022 swapStatusStr
[status
] << " " << dirn
<< " " <<
2023 std::hex
<< std::setw(8) << std::setfill('0') <<
2024 std::uppercase
<< fno
);
2028 swap_status
= status
;
2033 StoreEntry::detachFromDisk()
2037 swap_status
= SWAPOUT_NONE
;
2041 StoreEntry::checkDisk() const
2043 const bool ok
= (swap_dirn
< 0) == (swap_filen
< 0) &&
2044 (swap_dirn
< 0) == (swap_status
== SWAPOUT_NONE
) &&
2045 (swap_dirn
< 0 || swap_dirn
< Config
.cacheSwap
.n_configured
);
2048 debugs(88, DBG_IMPORTANT
, "ERROR: inconsistent disk entry state " << *this);
2049 throw std::runtime_error("inconsistent disk entry state ");
2054 * return true if the entry is in a state where
2055 * it can accept more data (ie with write() method)
2058 StoreEntry::isAccepting() const
2060 if (STORE_PENDING
!= store_status
)
2063 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
2070 StoreEntry::describeTimestamps() const
2072 LOCAL_ARRAY(char, buf
, 256);
2073 snprintf(buf
, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2074 static_cast<int>(timestamp
),
2075 static_cast<int>(lastref
),
2076 static_cast<int>(lastModified_
),
2077 static_cast<int>(expires
));
2081 static std::ostream
&
2082 operator <<(std::ostream
&os
, const Store::IoStatus
&io
)
2085 case Store::ioUndecided
:
2088 case Store::ioReading
:
2091 case Store::ioWriting
:
2101 std::ostream
&operator <<(std::ostream
&os
, const StoreEntry
&e
)
2105 if (e
.hasTransients()) {
2106 const auto &xitTable
= e
.mem_obj
->xitTable
;
2107 os
<< 't' << xitTable
.io
<< xitTable
.index
;
2110 if (e
.hasMemStore()) {
2111 const auto &memCache
= e
.mem_obj
->memCache
;
2112 os
<< 'm' << memCache
.io
<< memCache
.index
<< '@' << memCache
.offset
;
2115 // Do not use e.hasDisk() here because its checkDisk() call may calls us.
2116 if (e
.swap_filen
> -1 || e
.swap_dirn
> -1)
2117 os
<< 'd' << e
.swap_filen
<< '@' << e
.swap_dirn
;
2121 // print only non-default status values, using unique letters
2122 if (e
.mem_status
!= NOT_IN_MEMORY
||
2123 e
.store_status
!= STORE_PENDING
||
2124 e
.swap_status
!= SWAPOUT_NONE
||
2125 e
.ping_status
!= PING_NONE
) {
2126 if (e
.mem_status
!= NOT_IN_MEMORY
) os
<< 'm';
2127 if (e
.store_status
!= STORE_PENDING
) os
<< 's';
2128 if (e
.swap_status
!= SWAPOUT_NONE
) os
<< 'w' << e
.swap_status
;
2129 if (e
.ping_status
!= PING_NONE
) os
<< 'p' << e
.ping_status
;
2132 // print only set flags, using unique letters
2134 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) os
<< 'S';
2135 if (EBIT_TEST(e
.flags
, ENTRY_REVALIDATE_ALWAYS
)) os
<< 'R';
2136 if (EBIT_TEST(e
.flags
, DELAY_SENDING
)) os
<< 'P';
2137 if (EBIT_TEST(e
.flags
, RELEASE_REQUEST
)) os
<< 'X';
2138 if (EBIT_TEST(e
.flags
, REFRESH_REQUEST
)) os
<< 'F';
2139 if (EBIT_TEST(e
.flags
, ENTRY_REVALIDATE_STALE
)) os
<< 'E';
2140 if (EBIT_TEST(e
.flags
, KEY_PRIVATE
)) {
2142 if (e
.shareableWhenPrivate
)
2145 if (EBIT_TEST(e
.flags
, KEY_PRIVATE
)) os
<< 'I';
2146 if (EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
)) os
<< 'W';
2147 if (EBIT_TEST(e
.flags
, ENTRY_NEGCACHED
)) os
<< 'N';
2148 if (EBIT_TEST(e
.flags
, ENTRY_VALIDATED
)) os
<< 'V';
2149 if (EBIT_TEST(e
.flags
, ENTRY_BAD_LENGTH
)) os
<< 'L';
2150 if (EBIT_TEST(e
.flags
, ENTRY_ABORTED
)) os
<< 'A';
2153 return os
<< '/' << &e
<< '*' << e
.locks();
2156 /* NullStoreEntry */
2158 NullStoreEntry
NullStoreEntry::_instance
;
2161 NullStoreEntry::getInstance()
2167 NullStoreEntry::getMD5Text() const
2173 NullStoreEntry::operator delete(void*)
2175 fatal ("Attempt to delete NullStoreEntry\n");
2179 NullStoreEntry::getSerialisedMetaData()
2185 Store::EntryGuard::onException() noexcept
2187 SWALLOW_EXCEPTIONS({
2188 entry_
->releaseRequest(false);
2189 entry_
->unlock(context_
);