2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
35 #include "store_digest.h"
36 #include "store_key_md5.h"
37 #include "store_key_md5.h"
38 #include "store_log.h"
39 #include "store_rebuild.h"
40 #include "StoreClient.h"
41 #include "StoreIOState.h"
42 #include "StoreMeta.h"
44 #include "swap_log_op.h"
48 #include "DelayPools.h"
51 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
52 * XXX: convert to MEMPROXY_CLASS() API
59 #define REBUILD_TIMESTAMP_DELTA_MAX 2
61 #define STORE_IN_MEM_BUCKETS (229)
63 /** \todo Convert these string constants to enum string-arrays generated */
65 const char *memStatusStr
[] = {
70 const char *pingStatusStr
[] = {
76 const char *storeStatusStr
[] = {
81 const char *swapStatusStr
[] = {
88 * This defines an repl type
91 typedef struct _storerepl_entry storerepl_entry_t
;
93 struct _storerepl_entry
{
95 REMOVALPOLICYCREATE
*create
;
98 static storerepl_entry_t
*storerepl_list
= NULL
;
101 * local function prototypes
103 static int getKeyCounter(void);
104 static OBJH storeCheckCachableStats
;
105 static EVH storeLateRelease
;
110 static std::stack
<StoreEntry
*> LateReleaseStack
;
111 MemAllocator
*StoreEntry::pool
= NULL
;
113 StorePointer
Store::CurrentRoot
= NULL
;
116 Store::Root(Store
* aRoot
)
122 Store::Root(StorePointer aRoot
)
124 Root(aRoot
.getRaw());
128 Store::Stats(StoreEntry
* output
)
131 Root().stat(*output
);
147 Store::unlink (StoreEntry
&anEntry
)
149 fatal("Store::unlink on invalid Store\n");
153 StoreEntry::operator new (size_t bytecount
)
155 assert (bytecount
== sizeof (StoreEntry
));
158 pool
= memPoolCreate ("StoreEntry", bytecount
);
159 pool
->setChunkSize(2048 * 1024);
162 return pool
->alloc();
166 StoreEntry::operator delete (void *address
)
168 pool
->freeOne(address
);
172 StoreEntry::makePublic()
174 /* This object can be cached for a long time */
176 if (!EBIT_TEST(flags
, RELEASE_REQUEST
))
181 StoreEntry::makePrivate()
183 /* This object should never be cached at all */
185 releaseRequest(); /* delete object when not used */
189 StoreEntry::cacheNegatively()
191 /* This object may be negatively cached */
197 StoreEntry::inUseCount()
201 return pool
->getInUseCount();
205 StoreEntry::getMD5Text() const
207 return storeKeyText((const cache_key
*)key
);
213 StoreEntry::DeferReader(void *theContext
, CommRead
const &aRead
)
215 StoreEntry
*anEntry
= (StoreEntry
*)theContext
;
216 anEntry
->delayAwareRead(aRead
.conn
,
223 StoreEntry::delayAwareRead(const Comm::ConnectionPointer
&conn
, char *buf
, int len
, AsyncCall::Pointer callback
)
225 size_t amountToRead
= bytesWanted(Range
<size_t>(0, len
));
226 /* sketch: readdeferer* = getdeferer.
227 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
230 if (amountToRead
== 0) {
232 /* read ahead limit */
233 /* Perhaps these two calls should both live in MemObject */
235 if (!mem_obj
->readAheadPolicyCanRead()) {
237 mem_obj
->delayRead(DeferredRead(DeferReader
, this, CommRead(conn
, buf
, len
, callback
)));
243 mem_obj
->mostBytesAllowed().delayRead(DeferredRead(DeferReader
, this, CommRead(conn
, buf
, len
, callback
)));
250 if (fd_table
[conn
->fd
].closing()) {
251 // Readers must have closing callbacks if they want to be notified. No
252 // readers appeared to care around 2009/12/14 as they skipped reading
253 // for other reasons. Closing may already be true at the delyaAwareRead
254 // call time or may happen while we wait after delayRead() above.
255 debugs(20, 3, HERE
<< "wont read from closing " << conn
<< " for " <<
257 return; // the read callback will never be called
260 comm_read(conn
, buf
, amountToRead
, callback
);
264 StoreEntry::bytesWanted (Range
<size_t> const aRange
, bool ignoreDelayPools
) const
269 #if URL_CHECKSUM_DEBUG
271 mem_obj
->checkUrlChecksum();
275 if (!mem_obj
->readAheadPolicyCanRead())
278 return mem_obj
->mostBytesWanted(aRange
.end
, ignoreDelayPools
);
282 StoreEntry::checkDeferRead(int fd
) const
284 return (bytesWanted(Range
<size_t>(0,INT_MAX
)) == 0);
288 StoreEntry::setNoDelay (bool const newValue
)
291 mem_obj
->setNoDelay(newValue
);
294 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
295 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
296 // It does not mean we will read from disk exclusively (or at all!).
297 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
298 // XXX: Collapsed clients cannot predict their type.
300 StoreEntry::storeClientType() const
302 /* The needed offset isn't in memory
303 * XXX TODO: this is wrong for range requests
304 * as the needed offset may *not* be 0, AND
305 * offset 0 in the memory object is the HTTP headers.
310 if (mem_obj
->inmem_lo
)
311 return STORE_DISK_CLIENT
;
313 if (EBIT_TEST(flags
, ENTRY_ABORTED
)) {
314 /* I don't think we should be adding clients to aborted entries */
315 debugs(20, DBG_IMPORTANT
, "storeClientType: adding to ENTRY_ABORTED entry");
316 return STORE_MEM_CLIENT
;
319 if (store_status
== STORE_OK
) {
320 /* the object has completed. */
322 if (mem_obj
->inmem_lo
== 0 && !isEmpty()) {
323 if (swap_status
== SWAPOUT_DONE
) {
324 debugs(20,7, HERE
<< mem_obj
<< " lo: " << mem_obj
->inmem_lo
<< " hi: " << mem_obj
->endOffset() << " size: " << mem_obj
->object_sz
);
325 if (mem_obj
->endOffset() == mem_obj
->object_sz
) {
326 /* hot object fully swapped in (XXX: or swapped out?) */
327 return STORE_MEM_CLIENT
;
330 /* Memory-only, or currently being swapped out */
331 return STORE_MEM_CLIENT
;
334 return STORE_DISK_CLIENT
;
337 /* here and past, entry is STORE_PENDING */
339 * If this is the first client, let it be the mem client
341 if (mem_obj
->nclients
== 1)
342 return STORE_MEM_CLIENT
;
345 * If there is no disk file to open yet, we must make this a
346 * mem client. If we can't open the swapin file before writing
347 * to the client, there is no guarantee that we will be able
348 * to open it later when we really need it.
350 if (swap_status
== SWAPOUT_NONE
)
351 return STORE_MEM_CLIENT
;
354 * otherwise, make subsequent clients read from disk so they
355 * can not delay the first, and vice-versa.
357 return STORE_DISK_CLIENT
;
360 StoreEntry::StoreEntry() :
371 mem_status(NOT_IN_MEMORY
),
372 ping_status(PING_NONE
),
373 store_status(STORE_PENDING
),
374 swap_status(SWAPOUT_NONE
),
377 debugs(20, 5, "StoreEntry constructed, this=" << this);
380 StoreEntry::~StoreEntry()
382 debugs(20, 5, "StoreEntry destructed, this=" << this);
387 StoreEntry::deferProducer(const AsyncCall::Pointer
&producer
)
389 if (!deferredProducer
)
390 deferredProducer
= producer
;
392 debugs(20, 5, HERE
<< "Deferred producer call is allready set to: " <<
393 *deferredProducer
<< ", requested call: " << *producer
);
397 StoreEntry::kickProducer()
399 if (deferredProducer
!= NULL
) {
400 ScheduleCallHere(deferredProducer
);
401 deferredProducer
= NULL
;
407 StoreEntry::destroyMemObject()
409 debugs(20, 3, HERE
<< "destroyMemObject " << mem_obj
);
411 if (MemObject
*mem
= mem_obj
) {
412 // Store::Root() is FATALly missing during shutdown
413 if (mem
->xitTable
.index
>= 0 && !shutting_down
)
414 Store::Root().transientsDisconnect(*mem
);
415 if (mem
->memCache
.index
>= 0 && !shutting_down
)
416 Store::Root().memoryDisconnect(*this);
418 setMemStatus(NOT_IN_MEMORY
);
425 destroyStoreEntry(void *data
)
427 debugs(20, 3, HERE
<< "destroyStoreEntry: destroying " << data
);
428 StoreEntry
*e
= static_cast<StoreEntry
*>(static_cast<hash_link
*>(data
));
431 if (e
== NullStoreEntry::getInstance())
434 // Store::Root() is FATALly missing during shutdown
435 if (e
->swap_filen
>= 0 && !shutting_down
) {
436 SwapDir
&sd
= dynamic_cast<SwapDir
&>(*e
->store());
440 e
->destroyMemObject();
444 assert(e
->key
== NULL
);
449 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
452 StoreEntry::hashInsert(const cache_key
* someKey
)
454 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey
) << "'");
455 key
= storeKeyDup(someKey
);
456 hash_join(store_table
, this);
460 StoreEntry::hashDelete()
462 if (key
) { // some test cases do not create keys and do not hashInsert()
463 hash_remove_link(store_table
, this);
464 storeKeyFree((const cache_key
*)key
);
469 /* -------------------------------------------------------------------------- */
471 /* get rid of memory copy of the object */
473 StoreEntry::purgeMem()
478 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
480 Store::Root().memoryUnlink(*this);
482 if (swap_status
!= SWAPOUT_DONE
)
487 StoreEntry::lock(const char *context
)
490 debugs(20, 3, context
<< " locked key " << getMD5Text() << ' ' << *this);
496 lastref
= squid_curtime
;
497 Store::Root().reference(*this);
501 StoreEntry::setReleaseFlag()
503 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
506 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
508 EBIT_SET(flags
, RELEASE_REQUEST
);
510 Store::Root().markForUnlink(*this);
514 StoreEntry::releaseRequest()
516 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
519 setReleaseFlag(); // makes validToSend() false, preventing future hits
525 StoreEntry::unlock(const char *context
)
527 debugs(20, 3, (context
? context
: "somebody") <<
528 " unlocking key " << getMD5Text() << ' ' << *this);
529 assert(lock_count
> 0);
533 return (int) lock_count
;
535 if (store_status
== STORE_PENDING
)
538 assert(storePendingNClients(this) == 0);
540 if (EBIT_TEST(flags
, RELEASE_REQUEST
)) {
545 if (EBIT_TEST(flags
, KEY_PRIVATE
))
546 debugs(20, DBG_IMPORTANT
, "WARNING: " << __FILE__
<< ":" << __LINE__
<< ": found KEY_PRIVATE");
548 Store::Root().handleIdleEntry(*this); // may delete us
553 StoreEntry::getPublicByRequestMethod (StoreClient
*aClient
, HttpRequest
* request
, const HttpRequestMethod
& method
)
556 StoreEntry
*result
= storeGetPublicByRequestMethod( request
, method
);
559 aClient
->created (NullStoreEntry::getInstance());
561 aClient
->created (result
);
565 StoreEntry::getPublicByRequest (StoreClient
*aClient
, HttpRequest
* request
)
568 StoreEntry
*result
= storeGetPublicByRequest (request
);
571 result
= NullStoreEntry::getInstance();
573 aClient
->created (result
);
577 StoreEntry::getPublic (StoreClient
*aClient
, const char *uri
, const HttpRequestMethod
& method
)
580 StoreEntry
*result
= storeGetPublic (uri
, method
);
583 result
= NullStoreEntry::getInstance();
585 aClient
->created (result
);
589 storeGetPublic(const char *uri
, const HttpRequestMethod
& method
)
591 return Store::Root().get(storeKeyPublic(uri
, method
));
595 storeGetPublicByRequestMethod(HttpRequest
* req
, const HttpRequestMethod
& method
)
597 return Store::Root().get(storeKeyPublicByRequestMethod(req
, method
));
601 storeGetPublicByRequest(HttpRequest
* req
)
603 StoreEntry
*e
= storeGetPublicByRequestMethod(req
, req
->method
);
605 if (e
== NULL
&& req
->method
== Http::METHOD_HEAD
)
606 /* We can generate a HEAD reply from a cached GET object */
607 e
= storeGetPublicByRequestMethod(req
, Http::METHOD_GET
);
615 static int key_counter
= 0;
617 if (++key_counter
< 0)
623 /* RBC 20050104 AFAICT this should become simpler:
624 * rather than reinserting with a special key it should be marked
625 * as 'released' and then cleaned up when refcounting indicates.
626 * the StoreHashIndex could well implement its 'released' in the
628 * Also, clean log writing should skip over ia,t
629 * Otherwise, we need a 'remove from the index but not the store
633 StoreEntry::setPrivateKey()
635 const cache_key
*newkey
;
637 if (key
&& EBIT_TEST(flags
, KEY_PRIVATE
))
638 return; /* is already private */
641 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
643 // TODO: move into SwapDir::markForUnlink() already called by Root()
645 storeDirSwapLog(this, SWAP_LOG_DEL
);
650 if (mem_obj
&& mem_obj
->hasUris()) {
651 mem_obj
->id
= getKeyCounter();
652 newkey
= storeKeyPrivate(mem_obj
->storeId(), mem_obj
->method
, mem_obj
->id
);
654 newkey
= storeKeyPrivate("JUNK", Http::METHOD_NONE
, getKeyCounter());
657 assert(hash_lookup(store_table
, newkey
) == NULL
);
658 EBIT_SET(flags
, KEY_PRIVATE
);
663 StoreEntry::setPublicKey()
665 const cache_key
*newkey
;
667 if (key
&& !EBIT_TEST(flags
, KEY_PRIVATE
))
668 return; /* is already public */
673 * We can't make RELEASE_REQUEST objects public. Depending on
674 * when RELEASE_REQUEST gets set, we might not be swapping out
675 * the object. If we're not swapping out, then subsequent
676 * store clients won't be able to access object data which has
677 * been freed from memory.
679 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
681 #if MORE_DEBUG_OUTPUT
683 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
684 debugs(20, DBG_IMPORTANT
, "assertion failed: RELEASE key " << key
<< ", url " << mem_obj
->url
);
688 assert(!EBIT_TEST(flags
, RELEASE_REQUEST
));
690 if (mem_obj
->request
) {
691 HttpRequest
*request
= mem_obj
->request
;
693 if (!mem_obj
->vary_headers
) {
694 /* First handle the case where the object no longer varies */
695 safe_free(request
->vary_headers
);
697 if (request
->vary_headers
&& strcmp(request
->vary_headers
, mem_obj
->vary_headers
) != 0) {
698 /* Oops.. the variance has changed. Kill the base object
699 * to record the new variance key
701 safe_free(request
->vary_headers
); /* free old "bad" variance key */
702 if (StoreEntry
*pe
= storeGetPublic(mem_obj
->storeId(), mem_obj
->method
))
706 /* Make sure the request knows the variance status */
707 if (!request
->vary_headers
) {
708 const char *vary
= httpMakeVaryMark(request
, mem_obj
->getReply());
711 request
->vary_headers
= xstrdup(vary
);
715 // TODO: storeGetPublic() calls below may create unlocked entries.
716 // We should add/use storeHas() API or lock/unlock those entries.
717 if (mem_obj
->vary_headers
&& !storeGetPublic(mem_obj
->storeId(), mem_obj
->method
)) {
718 /* Create "vary" base object */
720 StoreEntry
*pe
= storeCreateEntry(mem_obj
->storeId(), mem_obj
->logUri(), request
->flags
, request
->method
);
721 /* We are allowed to do this typecast */
722 HttpReply
*rep
= new HttpReply
;
723 rep
->setHeaders(Http::scOkay
, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime
+ 100000);
724 vary
= mem_obj
->getReply()->header
.getList(HDR_VARY
);
727 /* Again, we own this structure layout */
728 rep
->header
.putStr(HDR_VARY
, vary
.termedBuf());
732 #if X_ACCELERATOR_VARY
733 vary
= mem_obj
->getReply()->header
.getList(HDR_X_ACCELERATOR_VARY
);
735 if (vary
.size() > 0) {
736 /* Again, we own this structure layout */
737 rep
->header
.putStr(HDR_X_ACCELERATOR_VARY
, vary
.termedBuf());
742 pe
->replaceHttpReply(rep
, false); // no write until key is public
748 pe
->startWriting(); // after makePublic()
752 pe
->unlock("StoreEntry::setPublicKey+Vary");
755 newkey
= storeKeyPublicByRequest(mem_obj
->request
);
757 newkey
= storeKeyPublic(mem_obj
->storeId(), mem_obj
->method
);
759 if (StoreEntry
*e2
= (StoreEntry
*)hash_lookup(store_table
, newkey
)) {
760 debugs(20, 3, "Making old " << *e2
<< " private.");
764 if (mem_obj
->request
)
765 newkey
= storeKeyPublicByRequest(mem_obj
->request
);
767 newkey
= storeKeyPublic(mem_obj
->storeId(), mem_obj
->method
);
773 EBIT_CLR(flags
, KEY_PRIVATE
);
778 storeDirSwapLog(this, SWAP_LOG_ADD
);
782 storeCreatePureEntry(const char *url
, const char *log_url
, const RequestFlags
&flags
, const HttpRequestMethod
& method
)
784 StoreEntry
*e
= NULL
;
785 debugs(20, 3, "storeCreateEntry: '" << url
<< "'");
787 e
= new StoreEntry();
789 e
->mem_obj
->setUris(url
, log_url
, method
);
791 if (flags
.cachable
) {
792 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
797 e
->store_status
= STORE_PENDING
;
799 e
->lastref
= squid_curtime
;
800 e
->timestamp
= -1; /* set in StoreEntry::timestampsSet() */
801 e
->ping_status
= PING_NONE
;
802 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
807 storeCreateEntry(const char *url
, const char *logUrl
, const RequestFlags
&flags
, const HttpRequestMethod
& method
)
809 StoreEntry
*e
= storeCreatePureEntry(url
, logUrl
, flags
, method
);
810 e
->lock("storeCreateEntry");
812 if (neighbors_do_private_keys
|| !flags
.hierarchical
)
820 /* Mark object as expired */
822 StoreEntry::expireNow()
824 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
825 expires
= squid_curtime
;
829 StoreEntry::write (StoreIOBuffer writeBuffer
)
831 assert(mem_obj
!= NULL
);
832 /* This assert will change when we teach the store to update */
833 PROF_start(StoreEntry_write
);
834 assert(store_status
== STORE_PENDING
);
836 // XXX: caller uses content offset, but we also store headers
837 if (const HttpReply
*reply
= mem_obj
->getReply())
838 writeBuffer
.offset
+= reply
->hdr_sz
;
840 debugs(20, 5, "storeWrite: writing " << writeBuffer
.length
<< " bytes for '" << getMD5Text() << "'");
841 PROF_stop(StoreEntry_write
);
842 storeGetMemSpace(writeBuffer
.length
);
843 mem_obj
->write(writeBuffer
);
845 if (!EBIT_TEST(flags
, DELAY_SENDING
))
849 /* Append incoming data from a primary server to an entry. */
851 StoreEntry::append(char const *buf
, int len
)
853 assert(mem_obj
!= NULL
);
855 assert(store_status
== STORE_PENDING
);
857 StoreIOBuffer tempBuffer
;
858 tempBuffer
.data
= (char *)buf
;
859 tempBuffer
.length
= len
;
861 * XXX sigh, offset might be < 0 here, but it gets "corrected"
862 * later. This offset crap is such a mess.
864 tempBuffer
.offset
= mem_obj
->endOffset() - (getReply() ? getReply()->hdr_sz
: 0);
869 storeAppendPrintf(StoreEntry
* e
, const char *fmt
,...)
874 storeAppendVPrintf(e
, fmt
, args
);
878 /* used be storeAppendPrintf and Packer */
880 storeAppendVPrintf(StoreEntry
* e
, const char *fmt
, va_list vargs
)
882 LOCAL_ARRAY(char, buf
, 4096);
884 vsnprintf(buf
, 4096, fmt
, vargs
);
885 e
->append(buf
, strlen(buf
));
888 struct _store_check_cachable_hist
{
892 int not_entry_cachable
;
893 int wrong_content_length
;
898 int too_many_open_files
;
899 int too_many_open_fds
;
906 } store_check_cachable_hist
;
909 storeTooManyDiskFilesOpen(void)
911 if (Config
.max_open_disk_fds
== 0)
914 if (store_open_disk_fd
> Config
.max_open_disk_fds
)
921 StoreEntry::checkTooSmall()
923 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
926 if (STORE_OK
== store_status
)
927 if (mem_obj
->object_sz
>= 0 &&
928 mem_obj
->object_sz
< Config
.Store
.minObjectSize
)
930 if (getReply()->content_length
> -1)
931 if (getReply()->content_length
< Config
.Store
.minObjectSize
)
937 StoreEntry::checkTooBig() const
939 if (mem_obj
->endOffset() > store_maxobjsize
)
942 if (getReply()->content_length
< 0)
945 return (getReply()->content_length
> store_maxobjsize
);
948 // TODO: move "too many open..." checks outside -- we are called too early/late
950 StoreEntry::checkCachable()
952 // XXX: This method is used for both memory and disk caches, but some
953 // checks are specific to disk caches. Move them to mayStartSwapOut().
955 // XXX: This method may be called several times, sometimes with different
956 // outcomes, making store_check_cachable_hist counters misleading.
958 // check this first to optimize handling of repeated calls for uncachables
959 if (EBIT_TEST(flags
, RELEASE_REQUEST
)) {
960 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
961 ++store_check_cachable_hist
.no
.not_entry_cachable
; // TODO: rename?
962 return 0; // avoid rerequesting release below
965 #if CACHE_ALL_METHODS
967 if (mem_obj
->method
!= Http::METHOD_GET
) {
968 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
969 ++store_check_cachable_hist
.no
.non_get
;
972 if (store_status
== STORE_OK
&& EBIT_TEST(flags
, ENTRY_BAD_LENGTH
)) {
973 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
974 ++store_check_cachable_hist
.no
.wrong_content_length
;
975 } else if (EBIT_TEST(flags
, ENTRY_NEGCACHED
)) {
976 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
977 ++store_check_cachable_hist
.no
.negative_cached
;
978 return 0; /* avoid release call below */
979 } else if (!mem_obj
|| !getReply()) {
980 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
981 // this segfault protection, but how can we get such a HIT?
982 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
983 ++store_check_cachable_hist
.no
.missing_parts
;
984 } else if (checkTooBig()) {
985 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
986 ++store_check_cachable_hist
.no
.too_big
;
987 } else if (checkTooSmall()) {
988 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
989 ++store_check_cachable_hist
.no
.too_small
;
990 } else if (EBIT_TEST(flags
, KEY_PRIVATE
)) {
991 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
992 ++store_check_cachable_hist
.no
.private_key
;
993 } else if (swap_status
!= SWAPOUT_NONE
) {
995 * here we checked the swap_status because the remaining
996 * cases are only relevant only if we haven't started swapping
997 * out the object yet.
1000 } else if (storeTooManyDiskFilesOpen()) {
1001 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1002 ++store_check_cachable_hist
.no
.too_many_open_files
;
1003 } else if (fdNFree() < RESERVED_FD
) {
1004 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1005 ++store_check_cachable_hist
.no
.too_many_open_fds
;
1007 ++store_check_cachable_hist
.yes
.Default
;
1016 storeCheckCachableStats(StoreEntry
*sentry
)
1018 storeAppendPrintf(sentry
, "Category\t Count\n");
1020 #if CACHE_ALL_METHODS
1022 storeAppendPrintf(sentry
, "no.non_get\t%d\n",
1023 store_check_cachable_hist
.no
.non_get
);
1026 storeAppendPrintf(sentry
, "no.not_entry_cachable\t%d\n",
1027 store_check_cachable_hist
.no
.not_entry_cachable
);
1028 storeAppendPrintf(sentry
, "no.wrong_content_length\t%d\n",
1029 store_check_cachable_hist
.no
.wrong_content_length
);
1030 storeAppendPrintf(sentry
, "no.negative_cached\t%d\n",
1031 store_check_cachable_hist
.no
.negative_cached
);
1032 storeAppendPrintf(sentry
, "no.missing_parts\t%d\n",
1033 store_check_cachable_hist
.no
.missing_parts
);
1034 storeAppendPrintf(sentry
, "no.too_big\t%d\n",
1035 store_check_cachable_hist
.no
.too_big
);
1036 storeAppendPrintf(sentry
, "no.too_small\t%d\n",
1037 store_check_cachable_hist
.no
.too_small
);
1038 storeAppendPrintf(sentry
, "no.private_key\t%d\n",
1039 store_check_cachable_hist
.no
.private_key
);
1040 storeAppendPrintf(sentry
, "no.too_many_open_files\t%d\n",
1041 store_check_cachable_hist
.no
.too_many_open_files
);
1042 storeAppendPrintf(sentry
, "no.too_many_open_fds\t%d\n",
1043 store_check_cachable_hist
.no
.too_many_open_fds
);
1044 storeAppendPrintf(sentry
, "yes.default\t%d\n",
1045 store_check_cachable_hist
.yes
.Default
);
1049 StoreEntry::complete()
1051 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1053 if (store_status
!= STORE_PENDING
) {
1055 * if we're not STORE_PENDING, then probably we got aborted
1056 * and there should be NO clients on this entry
1058 assert(EBIT_TEST(flags
, ENTRY_ABORTED
));
1059 assert(mem_obj
->nclients
== 0);
1063 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1064 * in use of object_sz?
1066 mem_obj
->object_sz
= mem_obj
->endOffset();
1068 store_status
= STORE_OK
;
1070 assert(mem_status
== NOT_IN_MEMORY
);
1072 if (!validLength()) {
1073 EBIT_SET(flags
, ENTRY_BAD_LENGTH
);
1077 #if USE_CACHE_DIGESTS
1078 if (mem_obj
->request
)
1079 mem_obj
->request
->hier
.store_complete_stop
= current_time
;
1083 * We used to call invokeHandlers, then storeSwapOut. However,
1084 * Madhukar Reddy <myreddy@persistence.com> reported that
1085 * responses without content length would sometimes get released
1086 * in client_side, thinking that the response is incomplete.
1092 * Someone wants to abort this transfer. Set the reason in the
1093 * request structure, call the callback and mark the
1094 * entry for releasing
1099 ++statCounter
.aborted_requests
;
1100 assert(store_status
== STORE_PENDING
);
1101 assert(mem_obj
!= NULL
);
1102 debugs(20, 6, "storeAbort: " << getMD5Text());
1104 lock("StoreEntry::abort"); /* lock while aborting */
1109 EBIT_SET(flags
, ENTRY_ABORTED
);
1111 setMemStatus(NOT_IN_MEMORY
);
1113 store_status
= STORE_OK
;
1115 /* Notify the server side */
1119 * Should we check abort.data for validity?
1121 if (mem_obj
->abort
.callback
) {
1122 if (!cbdataReferenceValid(mem_obj
->abort
.data
))
1123 debugs(20, DBG_IMPORTANT
,HERE
<< "queueing event when abort.data is not valid");
1124 eventAdd("mem_obj->abort.callback",
1125 mem_obj
->abort
.callback
,
1126 mem_obj
->abort
.data
,
1132 /* XXX Should we reverse these two, so that there is no
1133 * unneeded disk swapping triggered?
1135 /* Notify the client side */
1138 // abort swap out, invalidating what was created so far (release follows)
1139 swapOutFileClose(StoreIOState::writerGone
);
1141 unlock("StoreEntry::abort"); /* unlock */
1145 * Clear Memory storage to accommodate the given object len
1148 storeGetMemSpace(int size
)
1150 PROF_start(storeGetMemSpace
);
1151 StoreEntry
*e
= NULL
;
1153 static time_t last_check
= 0;
1154 size_t pages_needed
;
1155 RemovalPurgeWalker
*walker
;
1157 if (squid_curtime
== last_check
) {
1158 PROF_stop(storeGetMemSpace
);
1162 last_check
= squid_curtime
;
1164 pages_needed
= (size
+ SM_PAGE_SIZE
-1) / SM_PAGE_SIZE
;
1166 if (mem_node::InUseCount() + pages_needed
< store_pages_max
) {
1167 PROF_stop(storeGetMemSpace
);
1171 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed
<<
1174 /* XXX what to set as max_scan here? */
1175 walker
= mem_policy
->PurgeInit(mem_policy
, 100000);
1177 while ((e
= walker
->Next(walker
))) {
1181 if (mem_node::InUseCount() + pages_needed
< store_pages_max
)
1185 walker
->Done(walker
);
1186 debugs(20, 3, "storeGetMemSpace stats:");
1187 debugs(20, 3, " " << std::setw(6) << hot_obj_count
<< " HOT objects");
1188 debugs(20, 3, " " << std::setw(6) << released
<< " were released");
1189 PROF_stop(storeGetMemSpace
);
1192 /* thunk through to Store::Root().maintain(). Note that this would be better still
1193 * if registered against the root store itself, but that requires more complex
1194 * update logic - bigger fish to fry first. Long term each store when
1195 * it becomes active will self register
1198 Store::Maintain(void *notused
)
1200 Store::Root().maintain();
1202 /* Reregister a maintain event .. */
1203 eventAdd("MaintainSwapSpace", Maintain
, NULL
, 1.0, 1);
1207 /* The maximum objects to scan for maintain storage space */
1208 #define MAINTAIN_MAX_SCAN 1024
1209 #define MAINTAIN_MAX_REMOVE 64
1212 * This routine is to be called by main loop in main.c.
1213 * It removes expired objects on only one bucket for each time called.
1215 * This should get called 1/s from main().
1218 StoreController::maintain()
1220 static time_t last_warn_time
= 0;
1222 PROF_start(storeMaintainSwapSpace
);
1223 swapDir
->maintain();
1225 /* this should be emitted by the oversize dir, not globally */
1227 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1228 if (squid_curtime
- last_warn_time
> 10) {
1229 debugs(20, DBG_CRITICAL
, "WARNING: Disk space over limit: "
1230 << Store::Root().currentSize() / 1024.0 << " KB > "
1231 << (Store::Root().maxSize() >> 10) << " KB");
1232 last_warn_time
= squid_curtime
;
1236 PROF_stop(storeMaintainSwapSpace
);
1239 /* release an object from a cache */
1241 StoreEntry::release()
1243 PROF_start(storeRelease
);
1244 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1245 /* If, for any reason we can't discard this object because of an
1246 * outstanding request, mark it for pending release */
1250 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1252 PROF_stop(storeRelease
);
1256 Store::Root().memoryUnlink(*this);
1258 if (StoreController::store_dirs_rebuilding
&& swap_filen
> -1) {
1261 if (swap_filen
> -1) {
1262 // lock the entry until rebuilding is done
1263 lock("storeLateRelease");
1265 LateReleaseStack
.push(this);
1267 destroyStoreEntry(static_cast<hash_link
*>(this));
1268 // "this" is no longer valid
1271 PROF_stop(storeRelease
);
1275 storeLog(STORE_LOG_RELEASE
, this);
1277 if (swap_filen
> -1) {
1278 // log before unlink() below clears swap_filen
1279 if (!EBIT_TEST(flags
, KEY_PRIVATE
))
1280 storeDirSwapLog(this, SWAP_LOG_DEL
);
1285 destroyStoreEntry(static_cast<hash_link
*>(this));
1286 PROF_stop(storeRelease
);
1290 storeLateRelease(void *unused
)
1295 if (StoreController::store_dirs_rebuilding
) {
1296 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 1.0, 1);
1300 // TODO: this works but looks unelegant.
1301 for (int i
= 0; i
< 10; ++i
) {
1302 if (LateReleaseStack
.empty()) {
1303 debugs(20, DBG_IMPORTANT
, "storeLateRelease: released " << n
<< " objects");
1306 e
= LateReleaseStack
.top();
1307 LateReleaseStack
.pop();
1310 e
->unlock("storeLateRelease");
1314 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 0.0, 1);
1317 /* return 1 if a store entry is locked */
1319 StoreEntry::locked() const
1325 * SPECIAL, PUBLIC entries should be "locked";
1326 * XXX: Their owner should lock them then instead of relying on this hack.
1328 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
1329 if (!EBIT_TEST(flags
, KEY_PRIVATE
))
1336 StoreEntry::validLength() const
1339 const HttpReply
*reply
;
1340 assert(mem_obj
!= NULL
);
1342 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1343 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1345 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply
->hdr_sz
);
1346 debugs(20, 5, "storeEntryValidLength: content_length = " << reply
->content_length
);
1348 if (reply
->content_length
< 0) {
1349 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1353 if (reply
->hdr_sz
== 0) {
1354 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1358 if (mem_obj
->method
== Http::METHOD_HEAD
) {
1359 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1363 if (reply
->sline
.status() == Http::scNotModified
)
1366 if (reply
->sline
.status() == Http::scNoContent
)
1369 diff
= reply
->hdr_sz
+ reply
->content_length
- objectLen();
1374 debugs(20, 3, "storeEntryValidLength: " << (diff
< 0 ? -diff
: diff
) << " bytes too " << (diff
< 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1380 storeRegisterWithCacheManager(void)
1382 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats
, 0, 1);
1383 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create
, 0, 1);
1384 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1385 storeCheckCachableStats
, 0, 1);
1392 mem_policy
= createRemovalPolicy(Config
.memPolicy
);
1395 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 1.0, 1);
1396 Store::Root().init();
1397 storeRebuildStart();
1399 storeRegisterWithCacheManager();
1402 /// computes maximum size of a cachable object
1403 /// larger objects are rejected by all (disk and memory) cache stores
1405 storeCalcMaxObjSize()
1407 int64_t ms
= 0; // nothing can be cached without at least one store consent
1409 // global maximum is at least the disk store maximum
1410 for (int i
= 0; i
< Config
.cacheSwap
.n_configured
; ++i
) {
1411 assert (Config
.cacheSwap
.swapDirs
[i
].getRaw());
1412 const int64_t storeMax
= dynamic_cast<SwapDir
*>(Config
.cacheSwap
.swapDirs
[i
].getRaw())->maxObjectSize();
1417 // global maximum is at least the memory store maximum
1418 // TODO: move this into a memory cache class when we have one
1419 const int64_t memMax
= static_cast<int64_t>(min(Config
.Store
.maxInMemObjSize
, Config
.memMaxSize
));
1427 storeConfigure(void)
1429 store_swap_high
= (long) (((float) Store::Root().maxSize() *
1430 (float) Config
.Swap
.highWaterMark
) / (float) 100);
1431 store_swap_low
= (long) (((float) Store::Root().maxSize() *
1432 (float) Config
.Swap
.lowWaterMark
) / (float) 100);
1433 store_pages_max
= Config
.memMaxSize
/ sizeof(mem_node
);
1435 store_maxobjsize
= storeCalcMaxObjSize();
1439 StoreEntry::memoryCachable()
1441 if (!checkCachable())
1444 if (mem_obj
== NULL
)
1447 if (mem_obj
->data_hdr
.size() == 0)
1450 if (mem_obj
->inmem_lo
!= 0)
1453 if (!Config
.onoff
.memory_cache_first
&& swap_status
== SWAPOUT_DONE
&& refcount
== 1)
1460 StoreEntry::checkNegativeHit() const
1462 if (!EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1465 if (expires
<= squid_curtime
)
1468 if (store_status
!= STORE_OK
)
1475 * Set object for negative caching.
1476 * Preserves any expiry information given by the server.
1477 * In absence of proper expiry info it will set to expire immediately,
1478 * or with HTTP-violations enabled the configured negative-TTL is observed
1481 StoreEntry::negativeCache()
1483 // XXX: should make the default for expires 0 instead of -1
1484 // so we can distinguish "Expires: -1" from nothing.
1486 #if USE_HTTP_VIOLATIONS
1487 expires
= squid_curtime
+ Config
.negativeTtl
;
1489 expires
= squid_curtime
;
1491 EBIT_SET(flags
, ENTRY_NEGCACHED
);
1495 storeFreeMemory(void)
1498 #if USE_CACHE_DIGESTS
1501 cacheDigestDestroy(store_digest
);
1505 store_digest
= NULL
;
1509 expiresMoreThan(time_t expires
, time_t when
)
1511 if (expires
< 0) /* No Expires given */
1514 return (expires
> (squid_curtime
+ when
));
1518 StoreEntry::validToSend() const
1520 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
1523 if (EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1524 if (expires
<= squid_curtime
)
1527 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
1530 // now check that the entry has a cache backing or is collapsed
1531 if (swap_filen
> -1) // backed by a disk cache
1534 if (swappingOut()) // will be backed by a disk cache
1537 if (!mem_obj
) // not backed by a memory cache and not collapsed
1540 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1541 // disk cache backing that store_client constructor will assert. XXX: This
1542 // is wrong for range requests (that could feed off nibbled memory) and for
1543 // entries backed by the shared memory cache (that could, in theory, get
1544 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1545 if (mem_obj
->inmem_lo
) // in memory cache, but got nibbled at
1548 // The following check is correct but useless at this position. TODO: Move
1549 // it up when the shared memory cache can either replenish locally nibbled
1550 // bytes or, better, does not use local RAM copy at all.
1551 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1558 StoreEntry::timestampsSet()
1560 const HttpReply
*reply
= getReply();
1561 time_t served_date
= reply
->date
;
1562 int age
= reply
->header
.getInt(HDR_AGE
);
1563 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1564 /* make sure that 0 <= served_date <= squid_curtime */
1566 if (served_date
< 0 || served_date
> squid_curtime
)
1567 served_date
= squid_curtime
;
1570 * If the returned Date: is more than 24 hours older than
1571 * the squid_curtime, then one of us needs to use NTP to set our
1572 * clock. We'll pretend that our clock is right.
1574 else if (served_date
< (squid_curtime
- 24 * 60 * 60) )
1575 served_date
= squid_curtime
;
1578 * Compensate with Age header if origin server clock is ahead
1579 * of us and there is a cache in between us and the origin
1580 * server. But DONT compensate if the age value is larger than
1581 * squid_curtime because it results in a negative served_date.
1583 if (age
> squid_curtime
- served_date
)
1584 if (squid_curtime
> age
)
1585 served_date
= squid_curtime
- age
;
1587 // compensate for Squid-to-server and server-to-Squid delays
1588 if (mem_obj
&& mem_obj
->request
) {
1589 const time_t request_sent
=
1590 mem_obj
->request
->hier
.peer_http_request_sent
.tv_sec
;
1591 if (0 < request_sent
&& request_sent
< squid_curtime
)
1592 served_date
-= (squid_curtime
- request_sent
);
1595 if (reply
->expires
> 0 && reply
->date
> -1)
1596 expires
= served_date
+ (reply
->expires
- reply
->date
);
1598 expires
= reply
->expires
;
1600 lastmod
= reply
->last_modified
;
1602 timestamp
= served_date
;
1606 StoreEntry::registerAbort(STABH
* cb
, void *data
)
1609 assert(mem_obj
->abort
.callback
== NULL
);
1610 mem_obj
->abort
.callback
= cb
;
1611 mem_obj
->abort
.data
= cbdataReference(data
);
1615 StoreEntry::unregisterAbort()
1618 if (mem_obj
->abort
.callback
) {
1619 mem_obj
->abort
.callback
= NULL
;
1620 cbdataReferenceDone(mem_obj
->abort
.data
);
1625 StoreEntry::dump(int l
) const
1627 debugs(20, l
, "StoreEntry->key: " << getMD5Text());
1628 debugs(20, l
, "StoreEntry->next: " << next
);
1629 debugs(20, l
, "StoreEntry->mem_obj: " << mem_obj
);
1630 debugs(20, l
, "StoreEntry->timestamp: " << timestamp
);
1631 debugs(20, l
, "StoreEntry->lastref: " << lastref
);
1632 debugs(20, l
, "StoreEntry->expires: " << expires
);
1633 debugs(20, l
, "StoreEntry->lastmod: " << lastmod
);
1634 debugs(20, l
, "StoreEntry->swap_file_sz: " << swap_file_sz
);
1635 debugs(20, l
, "StoreEntry->refcount: " << refcount
);
1636 debugs(20, l
, "StoreEntry->flags: " << storeEntryFlags(this));
1637 debugs(20, l
, "StoreEntry->swap_dirn: " << swap_dirn
);
1638 debugs(20, l
, "StoreEntry->swap_filen: " << swap_filen
);
1639 debugs(20, l
, "StoreEntry->lock_count: " << lock_count
);
1640 debugs(20, l
, "StoreEntry->mem_status: " << mem_status
);
1641 debugs(20, l
, "StoreEntry->ping_status: " << ping_status
);
1642 debugs(20, l
, "StoreEntry->store_status: " << store_status
);
1643 debugs(20, l
, "StoreEntry->swap_status: " << swap_status
);
1647 * NOTE, this function assumes only two mem states
1650 StoreEntry::setMemStatus(mem_status_t new_status
)
1652 if (new_status
== mem_status
)
1655 // are we using a shared memory cache?
1656 if (Config
.memShared
&& IamWorkerProcess()) {
1657 // This method was designed to update replacement policy, not to
1658 // actually purge something from the memory cache (TODO: rename?).
1659 // Shared memory cache does not have a policy that needs updates.
1660 mem_status
= new_status
;
1664 assert(mem_obj
!= NULL
);
1666 if (new_status
== IN_MEMORY
) {
1667 assert(mem_obj
->inmem_lo
== 0);
1669 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1670 debugs(20, 4, "not inserting special " << *this << " into policy");
1672 mem_policy
->Add(mem_policy
, this, &mem_obj
->repl
);
1673 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1676 ++hot_obj_count
; // TODO: maintain for the shared hot cache as well
1678 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1679 debugs(20, 4, "not removing special " << *this << " from policy");
1681 mem_policy
->Remove(mem_policy
, this, &mem_obj
->repl
);
1682 debugs(20, 4, "removed " << *this);
1688 mem_status
= new_status
;
1692 StoreEntry::url() const
1694 if (mem_obj
== NULL
)
1695 return "[null_mem_obj]";
1697 return mem_obj
->storeId();
1701 StoreEntry::makeMemObject()
1704 mem_obj
= new MemObject();
1709 StoreEntry::createMemObject(const char *aUrl
, const char *aLogUrl
, const HttpRequestMethod
&aMethod
)
1712 mem_obj
->setUris(aUrl
, aLogUrl
, aMethod
);
1715 /* this just sets DELAY_SENDING */
1717 StoreEntry::buffer()
1719 EBIT_SET(flags
, DELAY_SENDING
);
1722 /* this just clears DELAY_SENDING and Invokes the handlers */
1726 if (EBIT_TEST(flags
, DELAY_SENDING
)) {
1727 EBIT_CLR(flags
, DELAY_SENDING
);
1733 StoreEntry::objectLen() const
1735 assert(mem_obj
!= NULL
);
1736 return mem_obj
->object_sz
;
1740 StoreEntry::contentLen() const
1742 assert(mem_obj
!= NULL
);
1743 assert(getReply() != NULL
);
1744 return objectLen() - getReply()->hdr_sz
;
1748 StoreEntry::getReply () const
1750 if (NULL
== mem_obj
)
1753 return mem_obj
->getReply();
1760 debugs(20, 3, "StoreEntry::reset: " << url());
1762 HttpReply
*rep
= (HttpReply
*) getReply(); // bypass const
1764 expires
= lastmod
= timestamp
= -1;
1770 * This routine calls the SETUP routine for each fs type.
1771 * I don't know where the best place for this is, and I'm not going to shuffle
1772 * around large chunks of code right now (that can be done once its working.)
1781 * called to add another store removal policy module
1784 storeReplAdd(const char *type
, REMOVALPOLICYCREATE
* create
)
1788 /* find the number of currently known repl types */
1789 for (i
= 0; storerepl_list
&& storerepl_list
[i
].typestr
; ++i
) {
1790 if (strcmp(storerepl_list
[i
].typestr
, type
) == 0) {
1791 debugs(20, DBG_IMPORTANT
, "WARNING: Trying to load store replacement policy " << type
<< " twice.");
1796 /* add the new type */
1797 storerepl_list
= static_cast<storerepl_entry_t
*>(xrealloc(storerepl_list
, (i
+ 2) * sizeof(storerepl_entry_t
)));
1799 memset(&storerepl_list
[i
+ 1], 0, sizeof(storerepl_entry_t
));
1801 storerepl_list
[i
].typestr
= type
;
1803 storerepl_list
[i
].create
= create
;
1807 * Create a removal policy instance
1810 createRemovalPolicy(RemovalPolicySettings
* settings
)
1812 storerepl_entry_t
*r
;
1814 for (r
= storerepl_list
; r
&& r
->typestr
; ++r
) {
1815 if (strcmp(r
->typestr
, settings
->type
) == 0)
1816 return r
->create(settings
->args
);
1819 debugs(20, DBG_IMPORTANT
, "ERROR: Unknown policy " << settings
->type
);
1820 debugs(20, DBG_IMPORTANT
, "ERROR: Be sure to have set cache_replacement_policy");
1821 debugs(20, DBG_IMPORTANT
, "ERROR: and memory_replacement_policy in squid.conf!");
1822 fatalf("ERROR: Unknown policy %s\n", settings
->type
);
1823 return NULL
; /* NOTREACHED */
1828 storeSwapFileNumberSet(StoreEntry
* e
, sfileno filn
)
1830 if (e
->swap_file_number
== filn
)
1835 storeDirMapBitReset(e
->swap_file_number
);
1836 storeDirLRUDelete(e
);
1837 e
->swap_file_number
= -1;
1839 assert(-1 == e
->swap_file_number
);
1840 storeDirMapBitSet(e
->swap_file_number
= filn
);
1848 StoreEntry::storeErrorResponse(HttpReply
*reply
)
1850 lock("StoreEntry::storeErrorResponse");
1852 replaceHttpReply(reply
);
1857 unlock("StoreEntry::storeErrorResponse");
1861 * Replace a store entry with
1862 * a new reply. This eats the reply.
1865 StoreEntry::replaceHttpReply(HttpReply
*rep
, bool andStartWriting
)
1867 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1870 debugs(20, DBG_CRITICAL
, "Attempt to replace object with no in-memory representation");
1874 mem_obj
->replaceHttpReply(rep
);
1876 if (andStartWriting
)
1881 StoreEntry::startWriting()
1885 /* TODO: when we store headers serparately remove the header portion */
1886 /* TODO: mark the length of the headers ? */
1887 /* We ONLY want the headers */
1888 packerToStoreInit(&p
, this);
1893 const HttpReply
*rep
= getReply();
1896 rep
->packHeadersInto(&p
);
1897 mem_obj
->markEndOfReplyHeaders();
1898 EBIT_CLR(flags
, ENTRY_FWD_HDR_WAIT
);
1900 rep
->body
.packInto(&p
);
1906 StoreEntry::getSerialisedMetaData()
1908 StoreMeta
*tlv_list
= storeSwapMetaBuild(this);
1910 char *result
= storeSwapMetaPack(tlv_list
, &swap_hdr_sz
);
1911 storeSwapTLVFree(tlv_list
);
1912 assert (swap_hdr_sz
>= 0);
1913 mem_obj
->swap_hdr_sz
= (size_t) swap_hdr_sz
;
1918 * Abandon the transient entry our worker has created if neither the shared
1919 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1920 * any, should notice and use Plan B instead of getting stuck waiting for us
1921 * to start swapping the entry out.
1924 StoreEntry::transientsAbandonmentCheck()
1926 if (mem_obj
&& !mem_obj
->smpCollapsed
&& // this worker is responsible
1927 mem_obj
->xitTable
.index
>= 0 && // other workers may be interested
1928 mem_obj
->memCache
.index
< 0 && // rejected by the shared memory cache
1929 mem_obj
->swapout
.decision
== MemObject::SwapOut::swImpossible
) {
1930 debugs(20, 7, "cannot be shared: " << *this);
1931 if (!shutting_down
) // Store::Root() is FATALly missing during shutdown
1932 Store::Root().transientsAbandon(*this);
1937 StoreEntry::memOutDecision(const bool willCacheInRam
)
1939 transientsAbandonmentCheck();
1943 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision
&decision
)
1945 // Abandon our transient entry if neither shared memory nor disk wants it.
1947 mem_obj
->swapout
.decision
= decision
;
1948 transientsAbandonmentCheck();
1952 StoreEntry::trimMemory(const bool preserveSwappable
)
1956 * Bug #1943. We must not let go any data for IN_MEMORY
1957 * objects. We have to wait until the mem_status changes.
1959 if (mem_status
== IN_MEMORY
)
1962 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
1963 return; // cannot trim because we do not load them again
1965 if (preserveSwappable
)
1966 mem_obj
->trimSwappable();
1968 mem_obj
->trimUnSwappable();
1970 debugs(88, 7, *this << " inmem_lo=" << mem_obj
->inmem_lo
);
1974 StoreEntry::modifiedSince(HttpRequest
* request
) const
1977 time_t mod_time
= lastmod
;
1980 mod_time
= timestamp
;
1982 debugs(88, 3, "modifiedSince: '" << url() << "'");
1984 debugs(88, 3, "modifiedSince: mod_time = " << mod_time
);
1989 /* Find size of the object */
1990 object_length
= getReply()->content_length
;
1992 if (object_length
< 0)
1993 object_length
= contentLen();
1995 if (mod_time
> request
->ims
) {
1996 debugs(88, 3, "--> YES: entry newer than client");
1998 } else if (mod_time
< request
->ims
) {
1999 debugs(88, 3, "--> NO: entry older than client");
2001 } else if (request
->imslen
< 0) {
2002 debugs(88, 3, "--> NO: same LMT, no client length");
2004 } else if (request
->imslen
== object_length
) {
2005 debugs(88, 3, "--> NO: same LMT, same length");
2008 debugs(88, 3, "--> YES: same LMT, different length");
2014 StoreEntry::hasEtag(ETag
&etag
) const
2016 if (const HttpReply
*reply
= getReply()) {
2017 etag
= reply
->header
.getETag(HDR_ETAG
);
2025 StoreEntry::hasIfMatchEtag(const HttpRequest
&request
) const
2027 const String reqETags
= request
.header
.getList(HDR_IF_MATCH
);
2028 return hasOneOfEtags(reqETags
, false);
2032 StoreEntry::hasIfNoneMatchEtag(const HttpRequest
&request
) const
2034 const String reqETags
= request
.header
.getList(HDR_IF_NONE_MATCH
);
2035 // weak comparison is allowed only for HEAD or full-body GET requests
2036 const bool allowWeakMatch
= !request
.flags
.isRanged
&&
2037 (request
.method
== Http::METHOD_GET
|| request
.method
== Http::METHOD_HEAD
);
2038 return hasOneOfEtags(reqETags
, allowWeakMatch
);
2041 /// whether at least one of the request ETags matches entity ETag
2043 StoreEntry::hasOneOfEtags(const String
&reqETags
, const bool allowWeakMatch
) const
2045 const ETag repETag
= getReply()->header
.getETag(HDR_ETAG
);
2047 return strListIsMember(&reqETags
, "*", ',');
2049 bool matched
= false;
2050 const char *pos
= NULL
;
2053 while (!matched
&& strListGetItem(&reqETags
, ',', &item
, &ilen
, &pos
)) {
2054 if (!strncmp(item
, "*", ilen
))
2058 str
.append(item
, ilen
);
2060 if (etagParseInit(&reqETag
, str
.termedBuf())) {
2061 matched
= allowWeakMatch
? etagIsWeakEqual(repETag
, reqETag
) :
2062 etagIsStrongEqual(repETag
, reqETag
);
2070 StoreEntry::store() const
2072 assert(0 <= swap_dirn
&& swap_dirn
< Config
.cacheSwap
.n_configured
);
2073 return INDEXSD(swap_dirn
);
2077 StoreEntry::unlink()
2079 store()->unlink(*this); // implies disconnect()
2082 swap_status
= SWAPOUT_NONE
;
2086 * return true if the entry is in a state where
2087 * it can accept more data (ie with write() method)
2090 StoreEntry::isAccepting() const
2092 if (STORE_PENDING
!= store_status
)
2095 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
2101 std::ostream
&operator <<(std::ostream
&os
, const StoreEntry
&e
)
2106 if (e
.mem_obj
->xitTable
.index
> -1)
2107 os
<< 't' << e
.mem_obj
->xitTable
.index
;
2108 if (e
.mem_obj
->memCache
.index
> -1)
2109 os
<< 'm' << e
.mem_obj
->memCache
.index
;
2111 if (e
.swap_filen
> -1 || e
.swap_dirn
> -1)
2112 os
<< 'd' << e
.swap_filen
<< '@' << e
.swap_dirn
;
2116 // print only non-default status values, using unique letters
2117 if (e
.mem_status
!= NOT_IN_MEMORY
||
2118 e
.store_status
!= STORE_PENDING
||
2119 e
.swap_status
!= SWAPOUT_NONE
||
2120 e
.ping_status
!= PING_NONE
) {
2121 if (e
.mem_status
!= NOT_IN_MEMORY
) os
<< 'm';
2122 if (e
.store_status
!= STORE_PENDING
) os
<< 's';
2123 if (e
.swap_status
!= SWAPOUT_NONE
) os
<< 'w' << e
.swap_status
;
2124 if (e
.ping_status
!= PING_NONE
) os
<< 'p' << e
.ping_status
;
2127 // print only set flags, using unique letters
2129 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) os
<< 'S';
2130 if (EBIT_TEST(e
.flags
, ENTRY_REVALIDATE
)) os
<< 'R';
2131 if (EBIT_TEST(e
.flags
, DELAY_SENDING
)) os
<< 'P';
2132 if (EBIT_TEST(e
.flags
, RELEASE_REQUEST
)) os
<< 'X';
2133 if (EBIT_TEST(e
.flags
, REFRESH_REQUEST
)) os
<< 'F';
2134 if (EBIT_TEST(e
.flags
, ENTRY_DISPATCHED
)) os
<< 'D';
2135 if (EBIT_TEST(e
.flags
, KEY_PRIVATE
)) os
<< 'I';
2136 if (EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
)) os
<< 'W';
2137 if (EBIT_TEST(e
.flags
, ENTRY_NEGCACHED
)) os
<< 'N';
2138 if (EBIT_TEST(e
.flags
, ENTRY_VALIDATED
)) os
<< 'V';
2139 if (EBIT_TEST(e
.flags
, ENTRY_BAD_LENGTH
)) os
<< 'L';
2140 if (EBIT_TEST(e
.flags
, ENTRY_ABORTED
)) os
<< 'A';
2143 if (e
.mem_obj
&& e
.mem_obj
->smpCollapsed
)
2146 return os
<< '/' << &e
<< '*' << e
.locks();
2149 /* NullStoreEntry */
2151 NullStoreEntry
NullStoreEntry::_instance
;
2154 NullStoreEntry::getInstance()
2160 NullStoreEntry::getMD5Text() const
2166 NullStoreEntry::operator delete(void*)
2168 fatal ("Attempt to delete NullStoreEntry\n");
2172 NullStoreEntry::getSerialisedMetaData()