2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/AsyncCbdataCalls.h"
13 #include "base/IoManip.h"
14 #include "base/PackableStream.h"
15 #include "base/TextException.h"
16 #include "CacheDigest.h"
17 #include "CacheManager.h"
18 #include "CollapsedForwarding.h"
19 #include "comm/Connection.h"
20 #include "comm/Read.h"
21 #include "debug/Messages.h"
22 #if HAVE_DISKIO_MODULE_IPCIO
23 #include "DiskIO/IpcIo/IpcIoFile.h"
30 #include "HttpReply.h"
31 #include "HttpRequest.h"
33 #include "MemObject.h"
35 #include "mgr/Registration.h"
36 #include "mgr/StoreIoAction.h"
37 #include "repl_modules.h"
38 #include "RequestFlags.h"
39 #include "sbuf/Stream.h"
40 #include "SquidConfig.h"
41 #include "StatCounters.h"
44 #include "store/Controller.h"
45 #include "store/Disk.h"
46 #include "store/Disks.h"
47 #include "store/SwapMetaOut.h"
48 #include "store_digest.h"
49 #include "store_key_md5.h"
50 #include "store_log.h"
51 #include "store_rebuild.h"
52 #include "StoreClient.h"
53 #include "StoreIOState.h"
55 #include "swap_log_op.h"
58 #include "DelayPools.h"
61 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
62 * XXX: convert to MEMPROXY_CLASS() API
64 #include "mem/Allocator.h"
70 #define REBUILD_TIMESTAMP_DELTA_MAX 2
72 #define STORE_IN_MEM_BUCKETS (229)
74 // TODO: Convert these string constants to enum string-arrays generated
76 const char *memStatusStr
[] = {
81 const char *pingStatusStr
[] = {
87 const char *storeStatusStr
[] = {
92 const char *swapStatusStr
[] = {
100 * This defines an repl type
103 typedef struct _storerepl_entry storerepl_entry_t
;
105 struct _storerepl_entry
{
107 REMOVALPOLICYCREATE
*create
;
110 static storerepl_entry_t
*storerepl_list
= nullptr;
113 * local function prototypes
115 static int getKeyCounter(void);
116 static OBJH storeCheckCachableStats
;
117 static EVH storeLateRelease
;
122 static std::stack
<StoreEntry
*> LateReleaseStack
;
123 Mem::Allocator
*StoreEntry::pool
= nullptr;
126 Store::Stats(StoreEntry
* output
)
129 Root().stat(*output
);
132 /// reports the current state of Store-related queues
134 StatQueues(StoreEntry
*e
)
137 PackableStream
stream(*e
);
138 CollapsedForwarding::StatQueue(stream
);
139 #if HAVE_DISKIO_MODULE_IPCIO
141 IpcIoFile::StatQueue(stream
);
146 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
147 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
149 StoreEntry::operator new (size_t bytecount
)
151 assert(bytecount
== sizeof (StoreEntry
));
154 pool
= memPoolCreate ("StoreEntry", bytecount
);
157 return pool
->alloc();
161 StoreEntry::operator delete (void *address
)
163 pool
->freeOne(address
);
167 StoreEntry::makePublic(const KeyScope scope
)
169 /* This object can be cached for a long time */
170 return !EBIT_TEST(flags
, RELEASE_REQUEST
) && setPublicKey(scope
);
174 StoreEntry::makePrivate(const bool shareable
)
176 releaseRequest(shareable
); /* delete object when not used */
180 StoreEntry::clearPrivate()
182 assert(!EBIT_TEST(flags
, RELEASE_REQUEST
));
183 EBIT_CLR(flags
, KEY_PRIVATE
);
184 shareableWhenPrivate
= false;
188 StoreEntry::cacheNegatively()
190 /* This object may be negatively cached */
199 StoreEntry::inUseCount()
203 return pool
->getInUseCount();
207 StoreEntry::getMD5Text() const
209 return storeKeyText((const cache_key
*)key
);
213 StoreEntry::bytesWanted (Range
<size_t> const aRange
, bool ignoreDelayPools
) const
215 if (mem_obj
== nullptr)
218 #if URL_CHECKSUM_DEBUG
220 mem_obj
->checkUrlChecksum();
224 if (!mem_obj
->readAheadPolicyCanRead())
227 return mem_obj
->mostBytesWanted(aRange
.end
, ignoreDelayPools
);
231 StoreEntry::hasParsedReplyHeader() const
234 const auto &reply
= mem_obj
->baseReply();
235 if (reply
.pstate
== Http::Message::psParsed
) {
236 debugs(20, 7, reply
.hdr_sz
);
244 StoreEntry::checkDeferRead(int) const
246 return (bytesWanted(Range
<size_t>(0,INT_MAX
)) == 0);
250 StoreEntry::setNoDelay(bool const newValue
)
253 mem_obj
->setNoDelay(newValue
);
256 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
257 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
258 // It does not mean we will read from disk exclusively (or at all!).
259 // STORE_MEM_CLIENT covers all other cases, including in-memory entries,
260 // newly created entries, and entries not backed by disk or memory cache.
261 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
262 // XXX: Collapsed clients cannot predict their type.
264 StoreEntry::storeClientType() const
266 /* The needed offset isn't in memory
267 * XXX TODO: this is wrong for range requests
268 * as the needed offset may *not* be 0, AND
269 * offset 0 in the memory object is the HTTP headers.
274 debugs(20, 7, *this << " inmem_lo=" << mem_obj
->inmem_lo
);
276 if (mem_obj
->inmem_lo
)
277 return STORE_DISK_CLIENT
;
279 if (EBIT_TEST(flags
, ENTRY_ABORTED
)) {
280 /* I don't think we should be adding clients to aborted entries */
281 debugs(20, DBG_IMPORTANT
, "storeClientType: adding to ENTRY_ABORTED entry");
282 return STORE_MEM_CLIENT
;
286 return STORE_MEM_CLIENT
;
288 if (store_status
== STORE_OK
) {
289 /* the object has completed. */
291 if (mem_obj
->inmem_lo
== 0 && !isEmpty()) {
293 debugs(20,7, mem_obj
<< " lo: " << mem_obj
->inmem_lo
<< " hi: " << mem_obj
->endOffset() << " size: " << mem_obj
->object_sz
);
294 if (mem_obj
->endOffset() == mem_obj
->object_sz
) {
295 /* hot object fully swapped in (XXX: or swapped out?) */
296 return STORE_MEM_CLIENT
;
299 /* Memory-only, or currently being swapped out */
300 return STORE_MEM_CLIENT
;
303 debugs(20, 7, "STORE_OK STORE_DISK_CLIENT");
304 return STORE_DISK_CLIENT
;
307 /* here and past, entry is STORE_PENDING */
309 * If this is the first client, let it be the mem client
311 if (mem_obj
->nclients
== 0)
312 return STORE_MEM_CLIENT
;
315 * If there is no disk file to open yet, we must make this a
316 * mem client. If we can't open the swapin file before writing
317 * to the client, there is no guarantee that we will be able
318 * to open it later when we really need it.
320 if (swap_status
== SWAPOUT_NONE
)
321 return STORE_MEM_CLIENT
;
323 // TODO: The above "must make this a mem client" logic contradicts "Slight
324 // weirdness" logic in store_client::doCopy() that converts hits to misses
325 // on startSwapin() failures. We should probably attempt to open a swapin
326 // file _here_ instead (and avoid STORE_DISK_CLIENT designation for clients
327 // that fail to do so). That would also address a similar problem with Rock
328 // store that does not yet support swapin during SWAPOUT_WRITING.
331 * otherwise, make subsequent clients read from disk so they
332 * can not delay the first, and vice-versa.
334 debugs(20, 7, "STORE_PENDING STORE_DISK_CLIENT");
335 return STORE_DISK_CLIENT
;
338 StoreEntry::StoreEntry() :
349 mem_status(NOT_IN_MEMORY
),
350 ping_status(PING_NONE
),
351 store_status(STORE_PENDING
),
352 swap_status(SWAPOUT_NONE
),
354 shareableWhenPrivate(false)
356 debugs(20, 5, "StoreEntry constructed, this=" << this);
359 StoreEntry::~StoreEntry()
361 debugs(20, 5, "StoreEntry destructed, this=" << this);
366 StoreEntry::deferProducer(const AsyncCall::Pointer
&producer
)
368 if (!deferredProducer
)
369 deferredProducer
= producer
;
371 debugs(20, 5, "Deferred producer call is already set to: " <<
372 *deferredProducer
<< ", requested call: " << *producer
);
376 StoreEntry::kickProducer()
378 if (deferredProducer
!= nullptr) {
379 ScheduleCallHere(deferredProducer
);
380 deferredProducer
= nullptr;
386 StoreEntry::destroyMemObject()
388 debugs(20, 3, mem_obj
<< " in " << *this);
391 Store::Root().transientsDisconnect(*this);
393 Store::Root().memoryDisconnect(*this);
395 if (auto memObj
= mem_obj
) {
396 setMemStatus(NOT_IN_MEMORY
);
403 destroyStoreEntry(void *data
)
405 debugs(20, 3, "destroyStoreEntry: destroying " << data
);
406 StoreEntry
*e
= static_cast<StoreEntry
*>(static_cast<hash_link
*>(data
));
407 assert(e
!= nullptr);
410 e
->disk().disconnect(*e
);
412 e
->destroyMemObject();
416 assert(e
->key
== nullptr);
421 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
424 StoreEntry::hashInsert(const cache_key
* someKey
)
426 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey
) << "'");
428 key
= storeKeyDup(someKey
);
429 hash_join(store_table
, this);
433 StoreEntry::hashDelete()
435 if (key
) { // some test cases do not create keys and do not hashInsert()
436 hash_remove_link(store_table
, this);
437 storeKeyFree((const cache_key
*)key
);
442 /* -------------------------------------------------------------------------- */
445 StoreEntry::lock(const char *context
)
448 debugs(20, 3, context
<< " locked key " << getMD5Text() << ' ' << *this);
454 lastref
= squid_curtime
;
458 StoreEntry::releaseRequest(const bool shareable
)
460 debugs(20, 3, shareable
<< ' ' << *this);
462 shareableWhenPrivate
= false; // may already be false
463 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
465 setPrivateKey(shareable
, true);
469 StoreEntry::unlock(const char *context
)
471 debugs(20, 3, (context
? context
: "somebody") <<
472 " unlocking key " << getMD5Text() << ' ' << *this);
473 assert(lock_count
> 0);
477 return (int) lock_count
;
483 /// keep the unlocked StoreEntry object in the local store_table (if needed) or
484 /// delete it (otherwise)
486 StoreEntry::doAbandon(const char *context
)
488 debugs(20, 5, *this << " via " << (context
? context
: "somebody"));
490 assert(storePendingNClients(this) == 0);
492 // Both aborted local writers and aborted local readers (of remote writers)
493 // are STORE_PENDING, but aborted readers should never release().
494 if (EBIT_TEST(flags
, RELEASE_REQUEST
) ||
495 (store_status
== STORE_PENDING
&& !Store::Root().transientsReader(*this))) {
500 Store::Root().handleIdleEntry(*this); // may delete us
504 storeGetPublic(const char *uri
, const HttpRequestMethod
& method
)
506 return Store::Root().find(storeKeyPublic(uri
, method
));
510 storeGetPublicByRequestMethod(HttpRequest
* req
, const HttpRequestMethod
& method
, const KeyScope keyScope
)
512 return Store::Root().find(storeKeyPublicByRequestMethod(req
, method
, keyScope
));
516 storeGetPublicByRequest(HttpRequest
* req
, const KeyScope keyScope
)
518 StoreEntry
*e
= storeGetPublicByRequestMethod(req
, req
->method
, keyScope
);
520 if (e
== nullptr && req
->method
== Http::METHOD_HEAD
)
521 /* We can generate a HEAD reply from a cached GET object */
522 e
= storeGetPublicByRequestMethod(req
, Http::METHOD_GET
, keyScope
);
530 static int key_counter
= 0;
532 if (++key_counter
< 0)
538 /* RBC 20050104 AFAICT this should become simpler:
539 * rather than reinserting with a special key it should be marked
540 * as 'released' and then cleaned up when refcounting indicates.
541 * the StoreHashIndex could well implement its 'released' in the
543 * Also, clean log writing should skip over ia,t
544 * Otherwise, we need a 'remove from the index but not the store
548 StoreEntry::setPrivateKey(const bool shareable
, const bool permanent
)
550 debugs(20, 3, shareable
<< permanent
<< ' ' << *this);
552 EBIT_SET(flags
, RELEASE_REQUEST
); // may already be set
554 shareableWhenPrivate
= false; // may already be false
556 if (EBIT_TEST(flags
, KEY_PRIVATE
))
560 Store::Root().evictCached(*this); // all caches/workers will know
564 if (mem_obj
&& mem_obj
->hasUris())
565 mem_obj
->id
= getKeyCounter();
566 const cache_key
*newkey
= storeKeyPrivate();
568 assert(hash_lookup(store_table
, newkey
) == nullptr);
569 EBIT_SET(flags
, KEY_PRIVATE
);
570 shareableWhenPrivate
= shareable
;
575 StoreEntry::setPublicKey(const KeyScope scope
)
577 debugs(20, 3, *this);
578 if (key
&& !EBIT_TEST(flags
, KEY_PRIVATE
))
579 return true; // already public
584 * We can't make RELEASE_REQUEST objects public. Depending on
585 * when RELEASE_REQUEST gets set, we might not be swapping out
586 * the object. If we're not swapping out, then subsequent
587 * store clients won't be able to access object data which has
588 * been freed from memory.
590 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
593 assert(!EBIT_TEST(flags
, RELEASE_REQUEST
));
596 EntryGuard
newVaryMarker(adjustVary(), "setPublicKey+failure");
597 const cache_key
*pubKey
= calcPublicKey(scope
);
598 Store::Root().addWriting(this, pubKey
);
599 forcePublicKey(pubKey
);
600 newVaryMarker
.unlockAndReset("setPublicKey+success");
602 } catch (const std::exception
&ex
) {
603 debugs(20, 2, "for " << *this << " failed: " << ex
.what());
609 StoreEntry::clearPublicKeyScope()
611 if (!key
|| EBIT_TEST(flags
, KEY_PRIVATE
))
612 return; // probably the old public key was deleted or made private
614 // TODO: adjustVary() when collapsed revalidation supports that
616 const cache_key
*newKey
= calcPublicKey(ksDefault
);
617 if (!storeKeyHashCmp(key
, newKey
))
618 return; // probably another collapsed revalidation beat us to this change
620 forcePublicKey(newKey
);
623 /// Unconditionally sets public key for this store entry.
624 /// Releases the old entry with the same public key (if any).
626 StoreEntry::forcePublicKey(const cache_key
*newkey
)
628 debugs(20, 3, storeKeyText(newkey
) << " for " << *this);
631 if (StoreEntry
*e2
= (StoreEntry
*)hash_lookup(store_table
, newkey
)) {
633 debugs(20, 3, "releasing clashing " << *e2
);
642 assert(mem_obj
->hasUris());
646 storeDirSwapLog(this, SWAP_LOG_ADD
);
649 /// Calculates correct public key for feeding forcePublicKey().
650 /// Assumes adjustVary() has been called for this entry already.
652 StoreEntry::calcPublicKey(const KeyScope keyScope
)
655 return mem_obj
->request
? storeKeyPublicByRequest(mem_obj
->request
.getRaw(), keyScope
) :
656 storeKeyPublic(mem_obj
->storeId(), mem_obj
->method
, keyScope
);
659 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
660 /// The vary_headers field is used to calculate the Vary marker key.
661 /// Releases the old Vary marker with an outdated key (if any).
662 /// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
663 /// \throws std::exception on failures
665 StoreEntry::adjustVary()
669 if (!mem_obj
->request
)
672 HttpRequestPointer
request(mem_obj
->request
);
673 const auto &reply
= mem_obj
->freshestReply();
675 if (mem_obj
->vary_headers
.isEmpty()) {
676 /* First handle the case where the object no longer varies */
677 request
->vary_headers
.clear();
679 if (!request
->vary_headers
.isEmpty() && request
->vary_headers
.cmp(mem_obj
->vary_headers
) != 0) {
680 /* Oops.. the variance has changed. Kill the base object
681 * to record the new variance key
683 request
->vary_headers
.clear(); /* free old "bad" variance key */
684 if (StoreEntry
*pe
= storeGetPublic(mem_obj
->storeId(), mem_obj
->method
))
688 /* Make sure the request knows the variance status */
689 if (request
->vary_headers
.isEmpty())
690 request
->vary_headers
= httpMakeVaryMark(request
.getRaw(), &reply
);
693 // TODO: storeGetPublic() calls below may create unlocked entries.
694 // We should add/use storeHas() API or lock/unlock those entries.
695 if (!mem_obj
->vary_headers
.isEmpty() && !storeGetPublic(mem_obj
->storeId(), mem_obj
->method
)) {
696 /* Create "vary" base object */
697 StoreEntry
*pe
= storeCreateEntry(mem_obj
->storeId(), mem_obj
->logUri(), request
->flags
, request
->method
);
698 // XXX: storeCreateEntry() already tries to make `pe` public under
699 // certain conditions. If those conditions do not apply to Vary markers,
700 // then refactor to call storeCreatePureEntry() above. Otherwise,
701 // refactor to simply check whether `pe` is already public below.
702 if (!pe
->makePublic()) {
703 pe
->unlock("StoreEntry::adjustVary+failed_makePublic");
704 throw TexcHere("failed to make Vary marker public");
706 /* We are allowed to do this typecast */
707 const HttpReplyPointer
rep(new HttpReply
);
708 rep
->setHeaders(Http::scOkay
, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime
+ 100000);
709 auto vary
= reply
.header
.getList(Http::HdrType::VARY
);
712 /* Again, we own this structure layout */
713 rep
->header
.putStr(Http::HdrType::VARY
, vary
.termedBuf());
717 #if X_ACCELERATOR_VARY
718 vary
= reply
.header
.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY
);
720 if (vary
.size() > 0) {
721 /* Again, we own this structure layout */
722 rep
->header
.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY
, vary
.termedBuf());
727 pe
->replaceHttpReply(rep
, false); // no write until timestampsSet()
731 pe
->startWriting(); // after timestampsSet()
733 pe
->completeSuccessfully("wrote the entire Vary marker object");
741 storeCreatePureEntry(const char *url
, const char *log_url
, const HttpRequestMethod
& method
)
743 StoreEntry
*e
= nullptr;
744 debugs(20, 3, "storeCreateEntry: '" << url
<< "'");
746 e
= new StoreEntry();
747 e
->createMemObject(url
, log_url
, method
);
749 e
->store_status
= STORE_PENDING
;
751 e
->lastref
= squid_curtime
;
752 e
->timestamp
= -1; /* set in StoreEntry::timestampsSet() */
753 e
->ping_status
= PING_NONE
;
754 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
759 storeCreateEntry(const char *url
, const char *logUrl
, const RequestFlags
&flags
, const HttpRequestMethod
& method
)
761 StoreEntry
*e
= storeCreatePureEntry(url
, logUrl
, method
);
762 e
->lock("storeCreateEntry");
764 if (!neighbors_do_private_keys
&& flags
.hierarchical
&& flags
.cachable
&& e
->setPublicKey())
767 e
->setPrivateKey(false, !flags
.cachable
);
771 /* Mark object as expired */
773 StoreEntry::expireNow()
775 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
776 expires
= squid_curtime
;
780 StoreEntry::write (StoreIOBuffer writeBuffer
)
782 assert(mem_obj
!= nullptr);
783 /* This assert will change when we teach the store to update */
784 assert(store_status
== STORE_PENDING
);
786 // XXX: caller uses content offset, but we also store headers
787 writeBuffer
.offset
+= mem_obj
->baseReply().hdr_sz
;
789 debugs(20, 5, "storeWrite: writing " << writeBuffer
.length
<< " bytes for '" << getMD5Text() << "'");
790 storeGetMemSpace(writeBuffer
.length
);
791 mem_obj
->write(writeBuffer
);
793 if (EBIT_TEST(flags
, ENTRY_FWD_HDR_WAIT
) && !mem_obj
->readAheadPolicyCanRead()) {
794 debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this);
795 EBIT_CLR(flags
, ENTRY_FWD_HDR_WAIT
);
801 /* Append incoming data from a primary server to an entry. */
803 StoreEntry::append(char const *buf
, int len
)
805 assert(mem_obj
!= nullptr);
807 assert(store_status
== STORE_PENDING
);
809 StoreIOBuffer tempBuffer
;
810 tempBuffer
.data
= (char *)buf
;
811 tempBuffer
.length
= len
;
813 * XXX sigh, offset might be < 0 here, but it gets "corrected"
814 * later. This offset crap is such a mess.
816 tempBuffer
.offset
= mem_obj
->endOffset() - mem_obj
->baseReply().hdr_sz
;
821 StoreEntry::vappendf(const char *fmt
, va_list vargs
)
823 LOCAL_ARRAY(char, buf
, 4096);
828 /* Fix of bug 753r. The value of vargs is undefined
829 * after vsnprintf() returns. Make a copy of vargs
830 * in case we loop around and call vsnprintf() again.
834 if ((x
= vsnprintf(buf
, sizeof(buf
), fmt
, ap
)) < 0) {
835 fatal(xstrerr(errno
));
840 if (x
< static_cast<int>(sizeof(buf
))) {
845 // okay, do it the slow way.
846 char *buf2
= new char[x
+1];
847 int y
= vsnprintf(buf2
, x
+1, fmt
, vargs
);
848 assert(y
>= 0 && y
== x
);
853 // deprecated. use StoreEntry::appendf() instead.
855 storeAppendPrintf(StoreEntry
* e
, const char *fmt
,...)
859 e
->vappendf(fmt
, args
);
863 // deprecated. use StoreEntry::appendf() instead.
865 storeAppendVPrintf(StoreEntry
* e
, const char *fmt
, va_list vargs
)
867 e
->vappendf(fmt
, vargs
);
870 struct _store_check_cachable_hist
{
873 int not_entry_cachable
;
874 int wrong_content_length
;
878 int too_many_open_files
;
879 int too_many_open_fds
;
886 } store_check_cachable_hist
;
889 storeTooManyDiskFilesOpen(void)
891 if (Config
.max_open_disk_fds
== 0)
894 if (store_open_disk_fd
> Config
.max_open_disk_fds
)
901 StoreEntry::checkTooSmall()
903 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
906 if (STORE_OK
== store_status
)
907 if (mem_obj
->object_sz
>= 0 &&
908 mem_obj
->object_sz
< Config
.Store
.minObjectSize
)
911 const auto clen
= mem().baseReply().content_length
;
912 if (clen
>= 0 && clen
< Config
.Store
.minObjectSize
)
918 StoreEntry::checkTooBig() const
920 if (mem_obj
->endOffset() > store_maxobjsize
)
923 const auto clen
= mem_obj
->baseReply().content_length
;
924 return (clen
>= 0 && clen
> store_maxobjsize
);
927 // TODO: move "too many open..." checks outside -- we are called too early/late
929 StoreEntry::checkCachable()
931 // XXX: This method is used for both memory and disk caches, but some
932 // checks are specific to disk caches. Move them to mayStartSwapOut().
934 // XXX: This method may be called several times, sometimes with different
935 // outcomes, making store_check_cachable_hist counters misleading.
937 // check this first to optimize handling of repeated calls for uncachables
938 if (EBIT_TEST(flags
, RELEASE_REQUEST
)) {
939 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
940 ++store_check_cachable_hist
.no
.not_entry_cachable
; // TODO: rename?
941 return 0; // avoid rerequesting release below
944 if (EBIT_TEST(flags
, ENTRY_BAD_LENGTH
)) {
945 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
946 ++store_check_cachable_hist
.no
.wrong_content_length
;
947 } else if (!mem_obj
) {
948 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
949 // this segfault protection, but how can we get such a HIT?
950 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
951 ++store_check_cachable_hist
.no
.missing_parts
;
952 } else if (checkTooBig()) {
953 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
954 ++store_check_cachable_hist
.no
.too_big
;
955 } else if (checkTooSmall()) {
956 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
957 ++store_check_cachable_hist
.no
.too_small
;
958 } else if (EBIT_TEST(flags
, KEY_PRIVATE
)) {
959 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
960 ++store_check_cachable_hist
.no
.private_key
;
961 } else if (hasDisk()) {
963 * the remaining cases are only relevant if we haven't
964 * started swapping out the object yet.
967 } else if (storeTooManyDiskFilesOpen()) {
968 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
969 ++store_check_cachable_hist
.no
.too_many_open_files
;
970 } else if (fdNFree() < RESERVED_FD
) {
971 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
972 ++store_check_cachable_hist
.no
.too_many_open_fds
;
974 ++store_check_cachable_hist
.yes
.Default
;
983 storeCheckCachableStats(StoreEntry
*sentry
)
985 storeAppendPrintf(sentry
, "Category\t Count\n");
986 storeAppendPrintf(sentry
, "no.not_entry_cachable\t%d\n",
987 store_check_cachable_hist
.no
.not_entry_cachable
);
988 storeAppendPrintf(sentry
, "no.wrong_content_length\t%d\n",
989 store_check_cachable_hist
.no
.wrong_content_length
);
990 storeAppendPrintf(sentry
, "no.negative_cached\t%d\n",
991 0); // TODO: Remove this backward compatibility hack.
992 storeAppendPrintf(sentry
, "no.missing_parts\t%d\n",
993 store_check_cachable_hist
.no
.missing_parts
);
994 storeAppendPrintf(sentry
, "no.too_big\t%d\n",
995 store_check_cachable_hist
.no
.too_big
);
996 storeAppendPrintf(sentry
, "no.too_small\t%d\n",
997 store_check_cachable_hist
.no
.too_small
);
998 storeAppendPrintf(sentry
, "no.private_key\t%d\n",
999 store_check_cachable_hist
.no
.private_key
);
1000 storeAppendPrintf(sentry
, "no.too_many_open_files\t%d\n",
1001 store_check_cachable_hist
.no
.too_many_open_files
);
1002 storeAppendPrintf(sentry
, "no.too_many_open_fds\t%d\n",
1003 store_check_cachable_hist
.no
.too_many_open_fds
);
1004 storeAppendPrintf(sentry
, "yes.default\t%d\n",
1005 store_check_cachable_hist
.yes
.Default
);
1009 StoreEntry::lengthWentBad(const char *reason
)
1011 debugs(20, 3, "because " << reason
<< ": " << *this);
1012 EBIT_SET(flags
, ENTRY_BAD_LENGTH
);
1017 StoreEntry::completeSuccessfully(const char * const whyWeAreSure
)
1019 debugs(20, 3, whyWeAreSure
<< "; " << *this);
1024 StoreEntry::completeTruncated(const char * const truncationReason
)
1026 lengthWentBad(truncationReason
);
1031 StoreEntry::complete()
1033 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1035 // To preserve forwarding retries, call FwdState::complete() instead.
1036 EBIT_CLR(flags
, ENTRY_FWD_HDR_WAIT
);
1038 if (store_status
!= STORE_PENDING
) {
1040 * if we're not STORE_PENDING, then probably we got aborted
1041 * and there should be NO clients on this entry
1043 assert(EBIT_TEST(flags
, ENTRY_ABORTED
));
1044 assert(mem_obj
->nclients
== 0);
1048 mem_obj
->object_sz
= mem_obj
->endOffset();
1050 store_status
= STORE_OK
;
1052 assert(mem_status
== NOT_IN_MEMORY
);
1054 if (!EBIT_TEST(flags
, ENTRY_BAD_LENGTH
) && !validLength())
1055 lengthWentBad("!validLength() in complete()");
1057 #if USE_CACHE_DIGESTS
1058 if (mem_obj
->request
)
1059 mem_obj
->request
->hier
.store_complete_stop
= current_time
;
1063 * We used to call invokeHandlers, then storeSwapOut. However,
1064 * Madhukar Reddy <myreddy@persistence.com> reported that
1065 * responses without content length would sometimes get released
1066 * in client_side, thinking that the response is incomplete.
1072 * Someone wants to abort this transfer. Set the reason in the
1073 * request structure, call the callback and mark the
1074 * entry for releasing
1079 ++statCounter
.aborted_requests
;
1080 assert(store_status
== STORE_PENDING
);
1081 assert(mem_obj
!= nullptr);
1082 debugs(20, 6, "storeAbort: " << getMD5Text());
1084 lock("StoreEntry::abort"); /* lock while aborting */
1089 EBIT_SET(flags
, ENTRY_ABORTED
);
1091 // allow the Store clients to be told about the problem
1092 EBIT_CLR(flags
, ENTRY_FWD_HDR_WAIT
);
1094 setMemStatus(NOT_IN_MEMORY
);
1096 store_status
= STORE_OK
;
1098 /* Notify the server side */
1100 if (mem_obj
->abortCallback
) {
1101 ScheduleCallHere(mem_obj
->abortCallback
);
1102 mem_obj
->abortCallback
= nullptr;
1105 /* XXX Should we reverse these two, so that there is no
1106 * unneeded disk swapping triggered?
1108 /* Notify the client side */
1111 // abort swap out, invalidating what was created so far (release follows)
1112 swapOutFileClose(StoreIOState::writerGone
);
1114 unlock("StoreEntry::abort"); /* unlock */
1118 * Clear Memory storage to accommodate the given object len
1121 storeGetMemSpace(int size
)
1123 Store::Root().freeMemorySpace(size
);
1126 /* thunk through to Store::Root().maintain(). Note that this would be better still
1127 * if registered against the root store itself, but that requires more complex
1128 * update logic - bigger fish to fry first. Long term each store when
1129 * it becomes active will self register
1132 Store::Maintain(void *)
1134 Store::Root().maintain();
1136 /* Reregister a maintain event .. */
1137 eventAdd("MaintainSwapSpace", Maintain
, nullptr, 1.0, 1);
1141 /* The maximum objects to scan for maintain storage space */
1142 #define MAINTAIN_MAX_SCAN 1024
1143 #define MAINTAIN_MAX_REMOVE 64
1146 StoreEntry::release(const bool shareable
)
1148 debugs(20, 3, shareable
<< ' ' << *this << ' ' << getMD5Text());
1149 /* If, for any reason we can't discard this object because of an
1150 * outstanding request, mark it for pending release */
1153 releaseRequest(shareable
);
1157 if (Store::Controller::store_dirs_rebuilding
&& hasDisk()) {
1158 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1160 // lock the entry until rebuilding is done
1161 lock("storeLateRelease");
1162 releaseRequest(shareable
);
1163 LateReleaseStack
.push(this);
1167 storeLog(STORE_LOG_RELEASE
, this);
1168 Store::Root().evictCached(*this);
1169 destroyStoreEntry(static_cast<hash_link
*>(this));
1173 storeLateRelease(void *)
1178 if (Store::Controller::store_dirs_rebuilding
) {
1179 eventAdd("storeLateRelease", storeLateRelease
, nullptr, 1.0, 1);
1183 // TODO: this works but looks unelegant.
1184 for (int i
= 0; i
< 10; ++i
) {
1185 if (LateReleaseStack
.empty()) {
1186 debugs(20, Important(30), "storeLateRelease: released " << n
<< " objects");
1189 e
= LateReleaseStack
.top();
1190 LateReleaseStack
.pop();
1193 e
->unlock("storeLateRelease");
1197 eventAdd("storeLateRelease", storeLateRelease
, nullptr, 0.0, 1);
1200 /// whether the base response has all the body bytes we expect
1201 /// \returns true for responses with unknown/unspecified body length
1202 /// \returns true for responses with the right number of accumulated body bytes
1204 StoreEntry::validLength() const
1207 assert(mem_obj
!= nullptr);
1208 const auto reply
= &mem_obj
->baseReply();
1209 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1210 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1212 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply
->hdr_sz
);
1213 debugs(20, 5, "storeEntryValidLength: content_length = " << reply
->content_length
);
1215 if (reply
->content_length
< 0) {
1216 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1220 if (reply
->hdr_sz
== 0) {
1221 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1225 if (mem_obj
->method
== Http::METHOD_HEAD
) {
1226 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1230 if (reply
->sline
.status() == Http::scNotModified
)
1233 if (reply
->sline
.status() == Http::scNoContent
)
1236 diff
= reply
->hdr_sz
+ reply
->content_length
- objectLen();
1241 debugs(20, 3, "storeEntryValidLength: " << (diff
< 0 ? -diff
: diff
) << " bytes too " << (diff
< 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1247 storeRegisterWithCacheManager(void)
1249 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats
, 0, 1);
1250 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create
, 0, 1);
1251 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1252 storeCheckCachableStats
, 0, 1);
1253 Mgr::RegisterAction("store_queues", "SMP Transients and Caching Queues", StatQueues
, 0, 1);
1259 mem_policy
= createRemovalPolicy(Config
.memPolicy
);
1262 eventAdd("storeLateRelease", storeLateRelease
, nullptr, 1.0, 1);
1263 Store::Root().init();
1264 storeRebuildStart();
1266 storeRegisterWithCacheManager();
1270 storeConfigure(void)
1272 Store::Root().configure();
1276 StoreEntry::memoryCachable()
1278 if (!checkCachable())
1282 return 0; // avoid heavy optional work during shutdown
1284 if (mem_obj
== nullptr)
1287 if (mem_obj
->data_hdr
.size() == 0)
1290 if (mem_obj
->inmem_lo
!= 0)
1293 if (!Config
.onoff
.memory_cache_first
&& swappedOut() && refcount
== 1)
1300 StoreEntry::checkNegativeHit() const
1302 if (!EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1305 if (expires
<= squid_curtime
)
1308 if (store_status
!= STORE_OK
)
1315 * Set object for negative caching.
1316 * Preserves any expiry information given by the server.
1317 * In absence of proper expiry info it will set to expire immediately,
1318 * or with HTTP-violations enabled the configured negative-TTL is observed
1321 StoreEntry::negativeCache()
1323 // XXX: should make the default for expires 0 instead of -1
1324 // so we can distinguish "Expires: -1" from nothing.
1326 #if USE_HTTP_VIOLATIONS
1327 expires
= squid_curtime
+ Config
.negativeTtl
;
1329 expires
= squid_curtime
;
1331 if (expires
> squid_curtime
) {
1332 EBIT_SET(flags
, ENTRY_NEGCACHED
);
1333 debugs(20, 6, "expires = " << expires
<< " +" << (expires
-squid_curtime
) << ' ' << *this);
1338 expiresMoreThan(time_t expires
, time_t when
)
1340 if (expires
< 0) /* No Expires given */
1343 return (expires
> (squid_curtime
+ when
));
1347 StoreEntry::validToSend() const
1349 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
1352 if (EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1353 if (expires
<= squid_curtime
)
1356 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
1359 // now check that the entry has a cache backing or is collapsed
1360 if (hasDisk()) // backed by a disk cache
1363 if (swappingOut()) // will be backed by a disk cache
1366 if (!mem_obj
) // not backed by a memory cache and not collapsed
1369 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1370 // disk cache backing that store_client constructor will assert. XXX: This
1371 // is wrong for range requests (that could feed off nibbled memory) and for
1372 // entries backed by the shared memory cache (that could, in theory, get
1373 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1374 if (mem_obj
->inmem_lo
) // in memory cache, but got nibbled at
1377 // The following check is correct but useless at this position. TODO: Move
1378 // it up when the shared memory cache can either replenish locally nibbled
1379 // bytes or, better, does not use local RAM copy at all.
1380 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1387 StoreEntry::timestampsSet()
1389 debugs(20, 7, *this << " had " << describeTimestamps());
1391 // TODO: Remove change-reducing "&" before the official commit.
1392 const auto reply
= &mem().freshestReply();
1394 time_t served_date
= reply
->date
;
1395 int age
= reply
->header
.getInt(Http::HdrType::AGE
);
1396 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1397 /* make sure that 0 <= served_date <= squid_curtime */
1399 if (served_date
< 0 || served_date
> squid_curtime
)
1400 served_date
= squid_curtime
;
1403 * If the returned Date: is more than 24 hours older than
1404 * the squid_curtime, then one of us needs to use NTP to set our
1405 * clock. We'll pretend that our clock is right.
1407 else if (served_date
< (squid_curtime
- 24 * 60 * 60) )
1408 served_date
= squid_curtime
;
1411 * Compensate with Age header if origin server clock is ahead
1412 * of us and there is a cache in between us and the origin
1413 * server. But DONT compensate if the age value is larger than
1414 * squid_curtime because it results in a negative served_date.
1416 if (age
> squid_curtime
- served_date
)
1417 if (squid_curtime
> age
)
1418 served_date
= squid_curtime
- age
;
1420 // compensate for Squid-to-server and server-to-Squid delays
1421 if (mem_obj
&& mem_obj
->request
) {
1422 struct timeval responseTime
;
1423 if (mem_obj
->request
->hier
.peerResponseTime(responseTime
))
1424 served_date
-= responseTime
.tv_sec
;
1428 if (reply
->expires
> 0 && reply
->date
> -1)
1429 exp
= served_date
+ (reply
->expires
- reply
->date
);
1431 exp
= reply
->expires
;
1433 if (timestamp
== served_date
&& expires
== exp
) {
1434 // if the reply lacks LMT, then we now know that our effective
1435 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1436 // new modification times must match
1437 if (reply
->last_modified
< 0 || reply
->last_modified
== lastModified())
1438 return false; // nothing has changed
1443 lastModified_
= reply
->last_modified
;
1445 timestamp
= served_date
;
1447 debugs(20, 5, *this << " has " << describeTimestamps());
1452 StoreEntry::updateOnNotModified(const StoreEntry
&e304
)
1455 assert(e304
.mem_obj
);
1457 // update reply before calling timestampsSet() below
1458 const auto &oldReply
= mem_obj
->freshestReply();
1459 const auto updatedReply
= oldReply
.recreateOnNotModified(e304
.mem_obj
->baseReply());
1460 if (updatedReply
) { // HTTP 304 brought in new information
1461 if (updatedReply
->prefixLen() > Config
.maxReplyHeaderSize
) {
1462 throw TextException(ToSBuf("cannot update the cached response because its updated ",
1463 updatedReply
->prefixLen(), "-byte header would exceed ",
1464 Config
.maxReplyHeaderSize
, "-byte reply_header_max_size"), Here());
1466 mem_obj
->updateReply(*updatedReply
);
1468 // else continue to use the previous update, if any
1470 if (!timestampsSet() && !updatedReply
)
1473 // Keep the old mem_obj->vary_headers; see HttpHeader::skipUpdateHeader().
1475 debugs(20, 5, "updated basics in " << *this << " with " << e304
);
1476 mem_obj
->appliedUpdates
= true; // helps in triage; may already be true
1481 StoreEntry::registerAbortCallback(const AsyncCall::Pointer
&handler
)
1484 assert(!mem_obj
->abortCallback
);
1485 mem_obj
->abortCallback
= handler
;
1489 StoreEntry::unregisterAbortCallback(const char *reason
)
1492 if (mem_obj
->abortCallback
) {
1493 mem_obj
->abortCallback
->cancel(reason
);
1494 mem_obj
->abortCallback
= nullptr;
1499 StoreEntry::dump(int l
) const
1501 debugs(20, l
, "StoreEntry->key: " << getMD5Text());
1502 debugs(20, l
, "StoreEntry->next: " << next
);
1503 debugs(20, l
, "StoreEntry->mem_obj: " << mem_obj
);
1504 debugs(20, l
, "StoreEntry->timestamp: " << timestamp
);
1505 debugs(20, l
, "StoreEntry->lastref: " << lastref
);
1506 debugs(20, l
, "StoreEntry->expires: " << expires
);
1507 debugs(20, l
, "StoreEntry->lastModified_: " << lastModified_
);
1508 debugs(20, l
, "StoreEntry->swap_file_sz: " << swap_file_sz
);
1509 debugs(20, l
, "StoreEntry->refcount: " << refcount
);
1510 debugs(20, l
, "StoreEntry->flags: " << storeEntryFlags(this));
1511 debugs(20, l
, "StoreEntry->swap_dirn: " << swap_dirn
);
1512 debugs(20, l
, "StoreEntry->swap_filen: " << swap_filen
);
1513 debugs(20, l
, "StoreEntry->lock_count: " << lock_count
);
1514 debugs(20, l
, "StoreEntry->mem_status: " << mem_status
);
1515 debugs(20, l
, "StoreEntry->ping_status: " << ping_status
);
1516 debugs(20, l
, "StoreEntry->store_status: " << store_status
);
1517 debugs(20, l
, "StoreEntry->swap_status: " << swap_status
);
1521 * NOTE, this function assumes only two mem states
1524 StoreEntry::setMemStatus(mem_status_t new_status
)
1526 if (new_status
== mem_status
)
1529 // are we using a shared memory cache?
1530 if (MemStore::Enabled()) {
1531 // This method was designed to update replacement policy, not to
1532 // actually purge something from the memory cache (TODO: rename?).
1533 // Shared memory cache does not have a policy that needs updates.
1534 mem_status
= new_status
;
1538 assert(mem_obj
!= nullptr);
1540 if (new_status
== IN_MEMORY
) {
1541 assert(mem_obj
->inmem_lo
== 0);
1543 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1544 debugs(20, 4, "not inserting special " << *this << " into policy");
1546 mem_policy
->Add(mem_policy
, this, &mem_obj
->repl
);
1547 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1550 ++hot_obj_count
; // TODO: maintain for the shared hot cache as well
1552 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1553 debugs(20, 4, "not removing special " << *this << " from policy");
1555 mem_policy
->Remove(mem_policy
, this, &mem_obj
->repl
);
1556 debugs(20, 4, "removed " << *this);
1562 mem_status
= new_status
;
1566 StoreEntry::url() const
1568 if (mem_obj
== nullptr)
1569 return "[null_mem_obj]";
1571 return mem_obj
->storeId();
1575 StoreEntry::createMemObject()
1578 mem_obj
= new MemObject();
1582 StoreEntry::createMemObject(const char *aUrl
, const char *aLogUrl
, const HttpRequestMethod
&aMethod
)
1585 ensureMemObject(aUrl
, aLogUrl
, aMethod
);
1589 StoreEntry::ensureMemObject(const char *aUrl
, const char *aLogUrl
, const HttpRequestMethod
&aMethod
)
1592 mem_obj
= new MemObject();
1593 mem_obj
->setUris(aUrl
, aLogUrl
, aMethod
);
1596 /** disable sending content to the clients.
1598 * This just sets DELAY_SENDING.
1601 StoreEntry::buffer()
1603 EBIT_SET(flags
, DELAY_SENDING
);
1606 /** flush any buffered content.
1608 * This just clears DELAY_SENDING and Invokes the handlers
1609 * to begin sending anything that may be buffered.
1614 if (EBIT_TEST(flags
, DELAY_SENDING
)) {
1615 EBIT_CLR(flags
, DELAY_SENDING
);
1623 debugs(20, 3, url());
1625 expires
= lastModified_
= timestamp
= -1;
1631 * This routine calls the SETUP routine for each fs type.
1632 * I don't know where the best place for this is, and I'm not going to shuffle
1633 * around large chunks of code right now (that can be done once its working.)
1642 * called to add another store removal policy module
1645 storeReplAdd(const char *type
, REMOVALPOLICYCREATE
* create
)
1649 /* find the number of currently known repl types */
1650 for (i
= 0; storerepl_list
&& storerepl_list
[i
].typestr
; ++i
) {
1651 if (strcmp(storerepl_list
[i
].typestr
, type
) == 0) {
1652 debugs(20, DBG_IMPORTANT
, "WARNING: Trying to load store replacement policy " << type
<< " twice.");
1657 /* add the new type */
1658 storerepl_list
= static_cast<storerepl_entry_t
*>(xrealloc(storerepl_list
, (i
+ 2) * sizeof(storerepl_entry_t
)));
1660 memset(&storerepl_list
[i
+ 1], 0, sizeof(storerepl_entry_t
));
1662 storerepl_list
[i
].typestr
= type
;
1664 storerepl_list
[i
].create
= create
;
1668 * Create a removal policy instance
1671 createRemovalPolicy(RemovalPolicySettings
* settings
)
1673 storerepl_entry_t
*r
;
1675 for (r
= storerepl_list
; r
&& r
->typestr
; ++r
) {
1676 if (strcmp(r
->typestr
, settings
->type
) == 0)
1677 return r
->create(settings
->args
);
1680 debugs(20, DBG_IMPORTANT
, "ERROR: Unknown policy " << settings
->type
);
1681 debugs(20, DBG_IMPORTANT
, "ERROR: Be sure to have set cache_replacement_policy");
1682 debugs(20, DBG_IMPORTANT
, "ERROR: and memory_replacement_policy in squid.conf!");
1683 fatalf("ERROR: Unknown policy %s\n", settings
->type
);
1684 return nullptr; /* NOTREACHED */
1688 StoreEntry::storeErrorResponse(HttpReply
*reply
)
1690 lock("StoreEntry::storeErrorResponse");
1692 replaceHttpReply(HttpReplyPointer(reply
));
1694 completeSuccessfully("replaceHttpReply() stored the entire error");
1696 releaseRequest(false); // if it is safe to negatively cache, sharing is OK
1697 unlock("StoreEntry::storeErrorResponse");
1701 * Replace a store entry with
1702 * a new reply. This eats the reply.
1705 StoreEntry::replaceHttpReply(const HttpReplyPointer
&rep
, const bool andStartWriting
)
1707 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1710 debugs(20, DBG_CRITICAL
, "Attempt to replace object with no in-memory representation");
1714 mem_obj
->replaceBaseReply(rep
);
1716 if (andStartWriting
)
1721 StoreEntry::startWriting()
1723 /* TODO: when we store headers separately remove the header portion */
1724 /* TODO: mark the length of the headers ? */
1725 /* We ONLY want the headers */
1729 // Per MemObject replies definitions, we can only write our base reply.
1730 // Currently, all callers replaceHttpReply() first, so there is no updated
1731 // reply here anyway. Eventually, we may need to support the
1732 // updateOnNotModified(),startWriting() sequence as well.
1733 assert(!mem_obj
->updatedReply());
1734 const auto rep
= &mem_obj
->baseReply();
1737 rep
->packHeadersUsingSlowPacker(*this);
1738 mem_obj
->markEndOfReplyHeaders();
1740 // Same-worker collapsing risks end with the receipt of the headers.
1741 // SMP collapsing risks remain until the headers are actually cached, but
1742 // that event is announced via CF-agnostic Store writing broadcasts.
1743 setCollapsingRequirement(false);
1745 rep
->body
.packInto(this);
1750 StoreEntry::getSerialisedMetaData(size_t &length
) const
1752 return static_cast<const char *>(Store::PackSwapMeta(*this, length
).release());
1756 * If needed, signal transient entry readers that no more cache changes are
1757 * expected and, hence, they should switch to Plan B instead of getting stuck
1758 * waiting for us to start or finish storing the entry.
1761 StoreEntry::storeWritingCheckpoint()
1763 if (!hasTransients())
1764 return; // no SMP complications
1766 // writers become readers but only after completeWriting() which we trigger
1767 if (Store::Root().transientsReader(*this))
1768 return; // readers do not need to inform
1771 if (mem_obj
->memCache
.io
!= Store::ioDone
) {
1772 debugs(20, 7, "not done with mem-caching " << *this);
1776 const auto doneWithDiskCache
=
1778 (mem_obj
->swapout
.decision
== MemObject::SwapOut::swImpossible
) ||
1779 // or has started but finished already
1780 (mem_obj
->swapout
.decision
== MemObject::SwapOut::swStarted
&& !swappingOut());
1781 if (!doneWithDiskCache
) {
1782 debugs(20, 7, "not done with disk-caching " << *this);
1786 debugs(20, 7, "done with writing " << *this);
1787 Store::Root().noteStoppedSharedWriting(*this);
1791 StoreEntry::memOutDecision(const bool willCacheInRam
)
1793 if (!willCacheInRam
)
1794 return storeWritingCheckpoint();
1795 assert(mem_obj
->memCache
.io
!= Store::ioDone
);
1796 // and wait for storeWriterDone()
1800 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision
&decision
)
1803 mem_obj
->swapout
.decision
= decision
;
1804 storeWritingCheckpoint();
1808 StoreEntry::storeWriterDone()
1810 storeWritingCheckpoint();
1814 StoreEntry::trimMemory(const bool preserveSwappable
)
1818 * Bug #1943. We must not let go any data for IN_MEMORY
1819 * objects. We have to wait until the mem_status changes.
1821 if (mem_status
== IN_MEMORY
)
1824 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
1825 return; // cannot trim because we do not load them again
1827 if (preserveSwappable
)
1828 mem_obj
->trimSwappable();
1830 mem_obj
->trimUnSwappable();
1832 debugs(88, 7, *this << " inmem_lo=" << mem_obj
->inmem_lo
);
1836 StoreEntry::modifiedSince(const time_t ims
, const int imslen
) const
1838 const time_t mod_time
= lastModified();
1840 debugs(88, 3, "modifiedSince: '" << url() << "'");
1842 debugs(88, 3, "modifiedSince: mod_time = " << mod_time
);
1847 assert(imslen
< 0); // TODO: Either remove imslen or support it properly.
1849 if (mod_time
> ims
) {
1850 debugs(88, 3, "--> YES: entry newer than client");
1852 } else if (mod_time
< ims
) {
1853 debugs(88, 3, "--> NO: entry older than client");
1856 debugs(88, 3, "--> NO: same LMT");
1862 StoreEntry::hasEtag(ETag
&etag
) const
1864 if (const auto reply
= hasFreshestReply()) {
1865 etag
= reply
->header
.getETag(Http::HdrType::ETAG
);
1873 StoreEntry::hasIfMatchEtag(const HttpRequest
&request
) const
1875 const String reqETags
= request
.header
.getList(Http::HdrType::IF_MATCH
);
1876 return hasOneOfEtags(reqETags
, false);
1880 StoreEntry::hasIfNoneMatchEtag(const HttpRequest
&request
) const
1882 const String reqETags
= request
.header
.getList(Http::HdrType::IF_NONE_MATCH
);
1883 // weak comparison is allowed only for HEAD or full-body GET requests
1884 const bool allowWeakMatch
= !request
.flags
.isRanged
&&
1885 (request
.method
== Http::METHOD_GET
|| request
.method
== Http::METHOD_HEAD
);
1886 return hasOneOfEtags(reqETags
, allowWeakMatch
);
1889 /// whether at least one of the request ETags matches entity ETag
1891 StoreEntry::hasOneOfEtags(const String
&reqETags
, const bool allowWeakMatch
) const
1893 const auto repETag
= mem().freshestReply().header
.getETag(Http::HdrType::ETAG
);
1895 static SBuf
asterisk("*", 1);
1896 return strListIsMember(&reqETags
, asterisk
, ',');
1899 bool matched
= false;
1900 const char *pos
= nullptr;
1903 while (!matched
&& strListGetItem(&reqETags
, ',', &item
, &ilen
, &pos
)) {
1904 if (!strncmp(item
, "*", ilen
))
1908 str
.append(item
, ilen
);
1910 if (etagParseInit(&reqETag
, str
.termedBuf())) {
1911 matched
= allowWeakMatch
? etagIsWeakEqual(repETag
, reqETag
) :
1912 etagIsStrongEqual(repETag
, reqETag
);
1920 StoreEntry::disk() const
1923 const RefCount
<Store::Disk
> &sd
= INDEXSD(swap_dirn
);
1929 StoreEntry::hasDisk(const sdirno dirn
, const sfileno filen
) const
1932 if (dirn
< 0 && filen
< 0)
1933 return swap_dirn
>= 0;
1935 const bool matchingDisk
= (swap_dirn
== dirn
);
1936 return filen
< 0 ? matchingDisk
: (matchingDisk
&& swap_filen
== filen
);
1940 StoreEntry::attachToDisk(const sdirno dirn
, const sfileno fno
, const swap_status_t status
)
1942 debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
1943 swapStatusStr
[status
] << " " << dirn
<< " " <<
1944 asHex(fno
).upperCase().minDigits(8));
1948 swap_status
= status
;
1953 StoreEntry::detachFromDisk()
1957 swap_status
= SWAPOUT_NONE
;
1961 StoreEntry::checkDisk() const
1964 if (swap_dirn
< 0) {
1965 Must(swap_filen
< 0);
1966 Must(swap_status
== SWAPOUT_NONE
);
1968 Must(swap_filen
>= 0);
1969 Must(swap_dirn
< Config
.cacheSwap
.n_configured
);
1970 if (swapoutFailed()) {
1971 Must(EBIT_TEST(flags
, RELEASE_REQUEST
));
1973 Must(swappingOut() || swappedOut());
1977 debugs(88, DBG_IMPORTANT
, "ERROR: inconsistent disk entry state " <<
1978 *this << "; problem: " << CurrentException
);
1984 * return true if the entry is in a state where
1985 * it can accept more data (ie with write() method)
1988 StoreEntry::isAccepting() const
1990 if (STORE_PENDING
!= store_status
)
1993 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
2000 StoreEntry::describeTimestamps() const
2002 LOCAL_ARRAY(char, buf
, 256);
2003 snprintf(buf
, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2004 static_cast<int>(timestamp
),
2005 static_cast<int>(lastref
),
2006 static_cast<int>(lastModified_
),
2007 static_cast<int>(expires
));
2012 StoreEntry::setCollapsingRequirement(const bool required
)
2014 if (hittingRequiresCollapsing() == required
)
2015 return; // no change
2017 debugs(20, 5, (required
? "adding to " : "removing from ") << *this);
2019 EBIT_SET(flags
, ENTRY_REQUIRES_COLLAPSING
);
2021 EBIT_CLR(flags
, ENTRY_REQUIRES_COLLAPSING
);
2024 static std::ostream
&
2025 operator <<(std::ostream
&os
, const Store::IoStatus
&io
)
2028 case Store::ioUndecided
:
2031 case Store::ioReading
:
2034 case Store::ioWriting
:
2044 std::ostream
&operator <<(std::ostream
&os
, const StoreEntry
&e
)
2048 if (e
.hasTransients()) {
2049 const auto &xitTable
= e
.mem_obj
->xitTable
;
2050 os
<< 't' << xitTable
.io
<< xitTable
.index
;
2053 if (e
.hasMemStore()) {
2054 const auto &memCache
= e
.mem_obj
->memCache
;
2055 os
<< 'm' << memCache
.io
<< memCache
.index
<< '@' << memCache
.offset
;
2058 // Do not use e.hasDisk() here because its checkDisk() call may calls us.
2059 if (e
.swap_filen
> -1 || e
.swap_dirn
> -1)
2060 os
<< 'd' << e
.swap_filen
<< '@' << e
.swap_dirn
;
2064 // print only non-default status values, using unique letters
2065 if (e
.mem_status
!= NOT_IN_MEMORY
||
2066 e
.store_status
!= STORE_PENDING
||
2067 e
.swap_status
!= SWAPOUT_NONE
||
2068 e
.ping_status
!= PING_NONE
) {
2069 if (e
.mem_status
!= NOT_IN_MEMORY
) os
<< 'm';
2070 if (e
.store_status
!= STORE_PENDING
) os
<< 's';
2071 if (e
.swap_status
!= SWAPOUT_NONE
) os
<< 'w' << e
.swap_status
;
2072 if (e
.ping_status
!= PING_NONE
) os
<< 'p' << e
.ping_status
;
2075 // print only set flags, using unique letters
2077 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) os
<< 'S';
2078 if (EBIT_TEST(e
.flags
, ENTRY_REVALIDATE_ALWAYS
)) os
<< 'R';
2079 if (EBIT_TEST(e
.flags
, DELAY_SENDING
)) os
<< 'P';
2080 if (EBIT_TEST(e
.flags
, RELEASE_REQUEST
)) os
<< 'X';
2081 if (EBIT_TEST(e
.flags
, REFRESH_REQUEST
)) os
<< 'F';
2082 if (EBIT_TEST(e
.flags
, ENTRY_REVALIDATE_STALE
)) os
<< 'E';
2083 if (EBIT_TEST(e
.flags
, KEY_PRIVATE
)) {
2085 if (e
.shareableWhenPrivate
)
2088 if (EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
)) os
<< 'W';
2089 if (EBIT_TEST(e
.flags
, ENTRY_NEGCACHED
)) os
<< 'N';
2090 if (EBIT_TEST(e
.flags
, ENTRY_VALIDATED
)) os
<< 'V';
2091 if (EBIT_TEST(e
.flags
, ENTRY_BAD_LENGTH
)) os
<< 'L';
2092 if (EBIT_TEST(e
.flags
, ENTRY_ABORTED
)) os
<< 'A';
2093 if (EBIT_TEST(e
.flags
, ENTRY_REQUIRES_COLLAPSING
)) os
<< 'C';
2096 return os
<< '/' << &e
<< '*' << e
.locks();
2100 Store::EntryGuard::onException() noexcept
2102 SWALLOW_EXCEPTIONS({
2103 entry_
->releaseRequest(false);
2104 entry_
->unlock(context_
);