5 * DEBUG: section 20 Storage Manager
6 * AUTHOR: Harvest Derived
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
37 #include "CacheManager.h"
38 #include "comm/Connection.h"
43 #include "mgr/Registration.h"
44 #include "StoreClient.h"
46 #include "HttpReply.h"
47 #include "HttpRequest.h"
48 #include "MemObject.h"
50 #include "StoreMeta.h"
52 #include "StoreIOState.h"
54 #include "DelayPools.h"
57 #include "SquidTime.h"
58 #include "swap_log_op.h"
59 #include "mgr/StoreIoAction.h"
61 static STMCB storeWriteComplete
;
63 #define REBUILD_TIMESTAMP_DELTA_MAX 2
65 #define STORE_IN_MEM_BUCKETS (229)
68 /** \todo Convert these string constants to enum string-arrays generated */
70 const char *memStatusStr
[] = {
75 const char *pingStatusStr
[] = {
81 const char *storeStatusStr
[] = {
86 const char *swapStatusStr
[] = {
94 * This defines an repl type
97 typedef struct _storerepl_entry storerepl_entry_t
;
99 struct _storerepl_entry
{
101 REMOVALPOLICYCREATE
*create
;
104 static storerepl_entry_t
*storerepl_list
= NULL
;
108 * local function prototypes
110 static int getKeyCounter(void);
111 static OBJH storeCheckCachableStats
;
112 static EVH storeLateRelease
;
117 static Stack
<StoreEntry
*> LateReleaseStack
;
118 MemAllocator
*StoreEntry::pool
= NULL
;
120 StorePointer
Store::CurrentRoot
= NULL
;
123 Store::Root(Store
* aRoot
)
129 Store::Root(StorePointer aRoot
)
131 Root(aRoot
.getRaw());
135 Store::Stats(StoreEntry
* output
)
138 Root().stat(*output
);
154 Store::unlink (StoreEntry
&anEntry
)
156 fatal("Store::unlink on invalid Store\n");
160 StoreEntry::operator new (size_t bytecount
)
162 assert (bytecount
== sizeof (StoreEntry
));
165 pool
= memPoolCreate ("StoreEntry", bytecount
);
166 pool
->setChunkSize(2048 * 1024);
169 return pool
->alloc();
173 StoreEntry::operator delete (void *address
)
175 pool
->freeOne(address
);
179 StoreEntry::makePublic()
181 /* This object can be cached for a long time */
183 if (EBIT_TEST(flags
, ENTRY_CACHABLE
))
188 StoreEntry::makePrivate()
190 /* This object should never be cached at all */
192 releaseRequest(); /* delete object when not used */
193 /* releaseRequest clears ENTRY_CACHABLE flag */
197 StoreEntry::cacheNegatively()
199 /* This object may be negatively cached */
202 if (EBIT_TEST(flags
, ENTRY_CACHABLE
))
207 StoreEntry::inUseCount()
211 return pool
->getInUseCount();
215 StoreEntry::getMD5Text() const
217 return storeKeyText((const cache_key
*)key
);
223 StoreEntry::DeferReader(void *theContext
, CommRead
const &aRead
)
225 StoreEntry
*anEntry
= (StoreEntry
*)theContext
;
226 anEntry
->delayAwareRead(aRead
.conn
,
233 StoreEntry::delayAwareRead(const Comm::ConnectionPointer
&conn
, char *buf
, int len
, AsyncCall::Pointer callback
)
235 size_t amountToRead
= bytesWanted(Range
<size_t>(0, len
));
236 /* sketch: readdeferer* = getdeferer.
237 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
240 if (amountToRead
== 0) {
242 /* read ahead limit */
243 /* Perhaps these two calls should both live in MemObject */
245 if (!mem_obj
->readAheadPolicyCanRead()) {
247 mem_obj
->delayRead(DeferredRead(DeferReader
, this, CommRead(conn
, buf
, len
, callback
)));
253 mem_obj
->mostBytesAllowed().delayRead(DeferredRead(DeferReader
, this, CommRead(conn
, buf
, len
, callback
)));
260 if (fd_table
[conn
->fd
].closing()) {
261 // Readers must have closing callbacks if they want to be notified. No
262 // readers appeared to care around 2009/12/14 as they skipped reading
263 // for other reasons. Closing may already be true at the delyaAwareRead
264 // call time or may happen while we wait after delayRead() above.
265 debugs(20, 3, HERE
<< "wont read from closing " << conn
<< " for " <<
267 return; // the read callback will never be called
270 comm_read(conn
, buf
, amountToRead
, callback
);
274 StoreEntry::bytesWanted (Range
<size_t> const aRange
) const
276 assert (aRange
.size());
279 return aRange
.end
- 1;
281 #if URL_CHECKSUM_DEBUG
283 mem_obj
->checkUrlChecksum();
287 /* Always read *something* here - we haven't got the header yet */
288 if (EBIT_TEST(flags
, ENTRY_FWD_HDR_WAIT
))
289 return aRange
.end
- 1;
291 if (!mem_obj
->readAheadPolicyCanRead())
294 return mem_obj
->mostBytesWanted(aRange
.end
- 1);
298 StoreEntry::checkDeferRead(int fd
) const
300 return (bytesWanted(Range
<size_t>(0,INT_MAX
)) == 0);
304 StoreEntry::setNoDelay (bool const newValue
)
307 mem_obj
->setNoDelay(newValue
);
311 StoreEntry::storeClientType() const
313 /* The needed offset isn't in memory
314 * XXX TODO: this is wrong for range requests
315 * as the needed offset may *not* be 0, AND
316 * offset 0 in the memory object is the HTTP headers.
319 if (mem_status
== IN_MEMORY
&& Config
.memShared
&& IamWorkerProcess()) {
320 // clients of an object cached in shared memory are memory clients
321 return STORE_MEM_CLIENT
;
326 if (mem_obj
->inmem_lo
)
327 return STORE_DISK_CLIENT
;
329 if (EBIT_TEST(flags
, ENTRY_ABORTED
)) {
330 /* I don't think we should be adding clients to aborted entries */
331 debugs(20, 1, "storeClientType: adding to ENTRY_ABORTED entry");
332 return STORE_MEM_CLIENT
;
335 if (store_status
== STORE_OK
) {
336 /* the object has completed. */
338 if (mem_obj
->inmem_lo
== 0 && !isEmpty()) {
339 if (swap_status
== SWAPOUT_DONE
) {
340 debugs(20,7, HERE
<< mem_obj
<< " lo: " << mem_obj
->inmem_lo
<< " hi: " << mem_obj
->endOffset() << " size: " << mem_obj
->object_sz
);
341 if (mem_obj
->endOffset() == mem_obj
->object_sz
) {
342 /* hot object fully swapped in */
343 return STORE_MEM_CLIENT
;
346 /* Memory-only, or currently being swapped out */
347 return STORE_MEM_CLIENT
;
350 return STORE_DISK_CLIENT
;
353 /* here and past, entry is STORE_PENDING */
355 * If this is the first client, let it be the mem client
357 if (mem_obj
->nclients
== 1)
358 return STORE_MEM_CLIENT
;
361 * If there is no disk file to open yet, we must make this a
362 * mem client. If we can't open the swapin file before writing
363 * to the client, there is no guarantee that we will be able
364 * to open it later when we really need it.
366 if (swap_status
== SWAPOUT_NONE
)
367 return STORE_MEM_CLIENT
;
370 * otherwise, make subsequent clients read from disk so they
371 * can not delay the first, and vice-versa.
373 return STORE_DISK_CLIENT
;
376 StoreEntry::StoreEntry():
377 hidden_mem_obj(NULL
),
380 debugs(20, 3, HERE
<< "new StoreEntry " << this);
383 expires
= lastmod
= lastref
= timestamp
= -1;
385 swap_status
= SWAPOUT_NONE
;
390 StoreEntry::StoreEntry(const char *aUrl
, const char *aLogUrl
):
391 hidden_mem_obj(NULL
),
394 debugs(20, 3, HERE
<< "new StoreEntry " << this);
395 mem_obj
= new MemObject(aUrl
, aLogUrl
);
397 expires
= lastmod
= lastref
= timestamp
= -1;
399 swap_status
= SWAPOUT_NONE
;
404 StoreEntry::~StoreEntry()
406 if (swap_filen
>= 0) {
407 SwapDir
&sd
= dynamic_cast<SwapDir
&>(*store());
408 sd
.disconnect(*this);
410 delete hidden_mem_obj
;
415 StoreEntry::deferProducer(const AsyncCall::Pointer
&producer
)
417 if (!deferredProducer
)
418 deferredProducer
= producer
;
420 debugs(20, 5, HERE
<< "Deferred producer call is allready set to: " <<
421 *deferredProducer
<< ", requested call: " << *producer
);
425 StoreEntry::kickProducer()
427 if (deferredProducer
!= NULL
) {
428 ScheduleCallHere(deferredProducer
);
429 deferredProducer
= NULL
;
435 StoreEntry::destroyMemObject()
437 debugs(20, 3, HERE
<< "destroyMemObject " << mem_obj
);
438 setMemStatus(NOT_IN_MEMORY
);
439 MemObject
*mem
= mem_obj
;
442 delete hidden_mem_obj
;
443 hidden_mem_obj
= NULL
;
447 StoreEntry::hideMemObject()
449 debugs(20, 3, HERE
<< "hiding " << mem_obj
);
451 assert(!hidden_mem_obj
);
452 hidden_mem_obj
= mem_obj
;
457 destroyStoreEntry(void *data
)
459 debugs(20, 3, HERE
<< "destroyStoreEntry: destroying " << data
);
460 StoreEntry
*e
= static_cast<StoreEntry
*>(static_cast<hash_link
*>(data
));
463 if (e
== NullStoreEntry::getInstance())
466 e
->destroyMemObject();
470 assert(e
->key
== NULL
);
475 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
478 StoreEntry::hashInsert(const cache_key
* someKey
)
480 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey
) << "'");
481 key
= storeKeyDup(someKey
);
482 hash_join(store_table
, this);
486 StoreEntry::hashDelete()
488 hash_remove_link(store_table
, this);
489 storeKeyFree((const cache_key
*)key
);
493 /* -------------------------------------------------------------------------- */
496 /* get rid of memory copy of the object */
498 StoreEntry::purgeMem()
503 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
507 if (swap_status
!= SWAPOUT_DONE
)
511 /* RBC 20050104 this is wrong- memory ref counting
512 * is not at all equivalent to the store 'usage' concept
513 * which the replacement policies should be acting upon.
514 * specifically, object iteration within stores needs
515 * memory ref counting to prevent race conditions,
516 * but this should not influence store replacement.
523 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
525 lastref
= squid_curtime
;
526 Store::Root().reference(*this);
530 StoreEntry::setReleaseFlag()
532 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
535 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
537 EBIT_SET(flags
, RELEASE_REQUEST
);
541 StoreEntry::releaseRequest()
543 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
549 * Clear cachable flag here because we might get called before
550 * anyone else even looks at the cachability flag. Also, this
551 * prevents httpMakePublic from really setting a public key.
553 EBIT_CLR(flags
, ENTRY_CACHABLE
);
558 /* unlock object, return -1 if object get released after unlock
559 * otherwise lock_count */
564 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count
);
567 return (int) lock_count
;
569 if (store_status
== STORE_PENDING
)
572 assert(storePendingNClients(this) == 0);
574 if (EBIT_TEST(flags
, RELEASE_REQUEST
)) {
579 if (EBIT_TEST(flags
, KEY_PRIVATE
))
580 debugs(20, 1, "WARNING: " << __FILE__
<< ":" << __LINE__
<< ": found KEY_PRIVATE");
582 Store::Root().handleIdleEntry(*this); // may delete us
587 StoreEntry::getPublicByRequestMethod (StoreClient
*aClient
, HttpRequest
* request
, const HttpRequestMethod
& method
)
590 StoreEntry
*result
= storeGetPublicByRequestMethod( request
, method
);
593 aClient
->created (NullStoreEntry::getInstance());
595 aClient
->created (result
);
599 StoreEntry::getPublicByRequest (StoreClient
*aClient
, HttpRequest
* request
)
602 StoreEntry
*result
= storeGetPublicByRequest (request
);
605 result
= NullStoreEntry::getInstance();
607 aClient
->created (result
);
611 StoreEntry::getPublic (StoreClient
*aClient
, const char *uri
, const HttpRequestMethod
& method
)
614 StoreEntry
*result
= storeGetPublic (uri
, method
);
617 result
= NullStoreEntry::getInstance();
619 aClient
->created (result
);
623 storeGetPublic(const char *uri
, const HttpRequestMethod
& method
)
625 return Store::Root().get(storeKeyPublic(uri
, method
));
629 storeGetPublicByRequestMethod(HttpRequest
* req
, const HttpRequestMethod
& method
)
631 return Store::Root().get(storeKeyPublicByRequestMethod(req
, method
));
635 storeGetPublicByRequest(HttpRequest
* req
)
637 StoreEntry
*e
= storeGetPublicByRequestMethod(req
, req
->method
);
639 if (e
== NULL
&& req
->method
== METHOD_HEAD
)
640 /* We can generate a HEAD reply from a cached GET object */
641 e
= storeGetPublicByRequestMethod(req
, METHOD_GET
);
649 static int key_counter
= 0;
651 if (++key_counter
< 0)
657 /* RBC 20050104 AFAICT this should become simpler:
658 * rather than reinserting with a special key it should be marked
659 * as 'released' and then cleaned up when refcounting indicates.
660 * the StoreHashIndex could well implement its 'released' in the
662 * Also, clean log writing should skip over ia,t
663 * Otherwise, we need a 'remove from the index but not the store
667 StoreEntry::setPrivateKey()
669 const cache_key
*newkey
;
671 if (key
&& EBIT_TEST(flags
, KEY_PRIVATE
))
672 return; /* is already private */
676 storeDirSwapLog(this, SWAP_LOG_DEL
);
681 if (mem_obj
!= NULL
) {
682 mem_obj
->id
= getKeyCounter();
683 newkey
= storeKeyPrivate(mem_obj
->url
, mem_obj
->method
, mem_obj
->id
);
685 newkey
= storeKeyPrivate("JUNK", METHOD_NONE
, getKeyCounter());
688 assert(hash_lookup(store_table
, newkey
) == NULL
);
689 EBIT_SET(flags
, KEY_PRIVATE
);
694 StoreEntry::setPublicKey()
696 StoreEntry
*e2
= NULL
;
697 const cache_key
*newkey
;
699 if (key
&& !EBIT_TEST(flags
, KEY_PRIVATE
))
700 return; /* is already public */
705 * We can't make RELEASE_REQUEST objects public. Depending on
706 * when RELEASE_REQUEST gets set, we might not be swapping out
707 * the object. If we're not swapping out, then subsequent
708 * store clients won't be able to access object data which has
709 * been freed from memory.
711 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
712 * be set, and StoreEntry::setPublicKey() should not be called.
714 #if MORE_DEBUG_OUTPUT
716 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
717 debugs(20, 1, "assertion failed: RELEASE key " << key
<< ", url " << mem_obj
->url
);
721 assert(!EBIT_TEST(flags
, RELEASE_REQUEST
));
723 if (mem_obj
->request
) {
724 HttpRequest
*request
= mem_obj
->request
;
726 if (!mem_obj
->vary_headers
) {
727 /* First handle the case where the object no longer varies */
728 safe_free(request
->vary_headers
);
730 if (request
->vary_headers
&& strcmp(request
->vary_headers
, mem_obj
->vary_headers
) != 0) {
731 /* Oops.. the variance has changed. Kill the base object
732 * to record the new variance key
734 safe_free(request
->vary_headers
); /* free old "bad" variance key */
735 StoreEntry
*pe
= storeGetPublic(mem_obj
->url
, mem_obj
->method
);
741 /* Make sure the request knows the variance status */
742 if (!request
->vary_headers
) {
743 const char *vary
= httpMakeVaryMark(request
, mem_obj
->getReply());
746 request
->vary_headers
= xstrdup(vary
);
750 // TODO: storeGetPublic() calls below may create unlocked entries.
751 // We should add/use storeHas() API or lock/unlock those entries.
752 if (mem_obj
->vary_headers
&& !storeGetPublic(mem_obj
->url
, mem_obj
->method
)) {
753 /* Create "vary" base object */
755 StoreEntry
*pe
= storeCreateEntry(mem_obj
->url
, mem_obj
->log_url
, request
->flags
, request
->method
);
756 /* We are allowed to do this typecast */
757 HttpReply
*rep
= new HttpReply
;
758 rep
->setHeaders(HTTP_OK
, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime
+ 100000);
759 vary
= mem_obj
->getReply()->header
.getList(HDR_VARY
);
762 /* Again, we own this structure layout */
763 rep
->header
.putStr(HDR_VARY
, vary
.termedBuf());
767 #if X_ACCELERATOR_VARY
768 vary
= mem_obj
->getReply()->header
.getList(HDR_X_ACCELERATOR_VARY
);
770 if (vary
.defined()) {
771 /* Again, we own this structure layout */
772 rep
->header
.putStr(HDR_X_ACCELERATOR_VARY
, vary
.termedBuf());
777 pe
->replaceHttpReply(rep
);
788 newkey
= storeKeyPublicByRequest(mem_obj
->request
);
790 newkey
= storeKeyPublic(mem_obj
->url
, mem_obj
->method
);
792 if ((e2
= (StoreEntry
*) hash_lookup(store_table
, newkey
))) {
793 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj
->url
<< "' private.");
797 if (mem_obj
->request
)
798 newkey
= storeKeyPublicByRequest(mem_obj
->request
);
800 newkey
= storeKeyPublic(mem_obj
->url
, mem_obj
->method
);
806 EBIT_CLR(flags
, KEY_PRIVATE
);
811 storeDirSwapLog(this, SWAP_LOG_ADD
);
815 storeCreateEntry(const char *url
, const char *log_url
, request_flags flags
, const HttpRequestMethod
& method
)
817 StoreEntry
*e
= NULL
;
818 MemObject
*mem
= NULL
;
819 debugs(20, 3, "storeCreateEntry: '" << url
<< "'");
821 e
= new StoreEntry(url
, log_url
);
822 e
->lock_count
= 1; /* Note lock here w/o calling storeLock() */
824 mem
->method
= method
;
826 if (neighbors_do_private_keys
|| !flags
.hierarchical
)
831 if (flags
.cachable
) {
832 EBIT_SET(e
->flags
, ENTRY_CACHABLE
);
833 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
835 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
839 e
->store_status
= STORE_PENDING
;
840 e
->setMemStatus(NOT_IN_MEMORY
);
842 e
->lastref
= squid_curtime
;
843 e
->timestamp
= -1; /* set in StoreEntry::timestampsSet() */
844 e
->ping_status
= PING_NONE
;
845 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
849 /* Mark object as expired */
851 StoreEntry::expireNow()
853 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
854 expires
= squid_curtime
;
858 storeWriteComplete (void *data
, StoreIOBuffer wroteBuffer
)
860 PROF_start(storeWriteComplete
);
861 StoreEntry
*e
= (StoreEntry
*)data
;
863 if (EBIT_TEST(e
->flags
, DELAY_SENDING
)) {
864 PROF_stop(storeWriteComplete
);
869 PROF_stop(storeWriteComplete
);
873 StoreEntry::write (StoreIOBuffer writeBuffer
)
875 assert(mem_obj
!= NULL
);
876 /* This assert will change when we teach the store to update */
877 PROF_start(StoreEntry_write
);
878 assert(store_status
== STORE_PENDING
);
880 debugs(20, 5, "storeWrite: writing " << writeBuffer
.length
<< " bytes for '" << getMD5Text() << "'");
881 PROF_stop(StoreEntry_write
);
882 storeGetMemSpace(writeBuffer
.length
);
883 mem_obj
->write (writeBuffer
, storeWriteComplete
, this);
886 /* Append incoming data from a primary server to an entry. */
888 StoreEntry::append(char const *buf
, int len
)
890 assert(mem_obj
!= NULL
);
892 assert(store_status
== STORE_PENDING
);
894 StoreIOBuffer tempBuffer
;
895 tempBuffer
.data
= (char *)buf
;
896 tempBuffer
.length
= len
;
898 * XXX sigh, offset might be < 0 here, but it gets "corrected"
899 * later. This offset crap is such a mess.
901 tempBuffer
.offset
= mem_obj
->endOffset() - (getReply() ? getReply()->hdr_sz
: 0);
907 storeAppendPrintf(StoreEntry
* e
, const char *fmt
,...)
912 storeAppendVPrintf(e
, fmt
, args
);
916 /* used be storeAppendPrintf and Packer */
918 storeAppendVPrintf(StoreEntry
* e
, const char *fmt
, va_list vargs
)
920 LOCAL_ARRAY(char, buf
, 4096);
922 vsnprintf(buf
, 4096, fmt
, vargs
);
923 e
->append(buf
, strlen(buf
));
926 struct _store_check_cachable_hist
{
930 int not_entry_cachable
;
931 int wrong_content_length
;
936 int too_many_open_files
;
937 int too_many_open_fds
;
943 } store_check_cachable_hist
;
946 storeTooManyDiskFilesOpen(void)
948 if (Config
.max_open_disk_fds
== 0)
951 if (store_open_disk_fd
> Config
.max_open_disk_fds
)
958 StoreEntry::checkTooSmall()
960 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
963 if (STORE_OK
== store_status
)
964 if (mem_obj
->object_sz
< 0 ||
965 mem_obj
->object_sz
< Config
.Store
.minObjectSize
)
967 if (getReply()->content_length
> -1)
968 if (getReply()->content_length
< Config
.Store
.minObjectSize
)
973 // TODO: remove checks already performed by swapoutPossible()
974 // TODO: move "too many open..." checks outside -- we are called too early/late
976 StoreEntry::checkCachable()
978 #if CACHE_ALL_METHODS
980 if (mem_obj
->method
!= METHOD_GET
) {
981 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
982 store_check_cachable_hist
.no
.non_get
++;
985 if (store_status
== STORE_OK
&& EBIT_TEST(flags
, ENTRY_BAD_LENGTH
)) {
986 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
987 store_check_cachable_hist
.no
.wrong_content_length
++;
988 } else if (!EBIT_TEST(flags
, ENTRY_CACHABLE
)) {
989 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
990 store_check_cachable_hist
.no
.not_entry_cachable
++;
991 } else if (EBIT_TEST(flags
, ENTRY_NEGCACHED
)) {
992 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
993 store_check_cachable_hist
.no
.negative_cached
++;
994 return 0; /* avoid release call below */
995 } else if ((getReply()->content_length
> 0 &&
996 getReply()->content_length
997 > Config
.Store
.maxObjectSize
) ||
998 mem_obj
->endOffset() > Config
.Store
.maxObjectSize
) {
999 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1000 store_check_cachable_hist
.no
.too_big
++;
1001 } else if (getReply()->content_length
> Config
.Store
.maxObjectSize
) {
1002 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1003 store_check_cachable_hist
.no
.too_big
++;
1004 } else if (checkTooSmall()) {
1005 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1006 store_check_cachable_hist
.no
.too_small
++;
1007 } else if (EBIT_TEST(flags
, KEY_PRIVATE
)) {
1008 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1009 store_check_cachable_hist
.no
.private_key
++;
1010 } else if (swap_status
!= SWAPOUT_NONE
) {
1012 * here we checked the swap_status because the remaining
1013 * cases are only relevant only if we haven't started swapping
1014 * out the object yet.
1017 } else if (storeTooManyDiskFilesOpen()) {
1018 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1019 store_check_cachable_hist
.no
.too_many_open_files
++;
1020 } else if (fdNFree() < RESERVED_FD
) {
1021 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1022 store_check_cachable_hist
.no
.too_many_open_fds
++;
1024 store_check_cachable_hist
.yes
.Default
++;
1029 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1034 storeCheckCachableStats(StoreEntry
*sentry
)
1036 storeAppendPrintf(sentry
, "Category\t Count\n");
1038 #if CACHE_ALL_METHODS
1040 storeAppendPrintf(sentry
, "no.non_get\t%d\n",
1041 store_check_cachable_hist
.no
.non_get
);
1044 storeAppendPrintf(sentry
, "no.not_entry_cachable\t%d\n",
1045 store_check_cachable_hist
.no
.not_entry_cachable
);
1046 storeAppendPrintf(sentry
, "no.wrong_content_length\t%d\n",
1047 store_check_cachable_hist
.no
.wrong_content_length
);
1048 storeAppendPrintf(sentry
, "no.negative_cached\t%d\n",
1049 store_check_cachable_hist
.no
.negative_cached
);
1050 storeAppendPrintf(sentry
, "no.too_big\t%d\n",
1051 store_check_cachable_hist
.no
.too_big
);
1052 storeAppendPrintf(sentry
, "no.too_small\t%d\n",
1053 store_check_cachable_hist
.no
.too_small
);
1054 storeAppendPrintf(sentry
, "no.private_key\t%d\n",
1055 store_check_cachable_hist
.no
.private_key
);
1056 storeAppendPrintf(sentry
, "no.too_many_open_files\t%d\n",
1057 store_check_cachable_hist
.no
.too_many_open_files
);
1058 storeAppendPrintf(sentry
, "no.too_many_open_fds\t%d\n",
1059 store_check_cachable_hist
.no
.too_many_open_fds
);
1060 storeAppendPrintf(sentry
, "yes.default\t%d\n",
1061 store_check_cachable_hist
.yes
.Default
);
1065 StoreEntry::complete()
1067 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1069 if (store_status
!= STORE_PENDING
) {
1071 * if we're not STORE_PENDING, then probably we got aborted
1072 * and there should be NO clients on this entry
1074 assert(EBIT_TEST(flags
, ENTRY_ABORTED
));
1075 assert(mem_obj
->nclients
== 0);
1079 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1080 * in use of object_sz?
1082 mem_obj
->object_sz
= mem_obj
->endOffset();
1084 store_status
= STORE_OK
;
1086 assert(mem_status
== NOT_IN_MEMORY
);
1088 if (!validLength()) {
1089 EBIT_SET(flags
, ENTRY_BAD_LENGTH
);
1093 #if USE_CACHE_DIGESTS
1094 if (mem_obj
->request
)
1095 mem_obj
->request
->hier
.store_complete_stop
= current_time
;
1099 * We used to call invokeHandlers, then storeSwapOut. However,
1100 * Madhukar Reddy <myreddy@persistence.com> reported that
1101 * responses without content length would sometimes get released
1102 * in client_side, thinking that the response is incomplete.
1108 * Someone wants to abort this transfer. Set the reason in the
1109 * request structure, call the server-side callback and mark the
1110 * entry for releasing
1115 statCounter
.aborted_requests
++;
1116 assert(store_status
== STORE_PENDING
);
1117 assert(mem_obj
!= NULL
);
1118 debugs(20, 6, "storeAbort: " << getMD5Text());
1120 lock(); /* lock while aborting */
1125 EBIT_SET(flags
, ENTRY_ABORTED
);
1127 setMemStatus(NOT_IN_MEMORY
);
1129 store_status
= STORE_OK
;
1131 /* Notify the server side */
1135 * Should we check abort.data for validity?
1137 if (mem_obj
->abort
.callback
) {
1138 if (!cbdataReferenceValid(mem_obj
->abort
.data
))
1139 debugs(20,1,HERE
<< "queueing event when abort.data is not valid");
1140 eventAdd("mem_obj->abort.callback",
1141 mem_obj
->abort
.callback
,
1142 mem_obj
->abort
.data
,
1148 /* XXX Should we reverse these two, so that there is no
1149 * unneeded disk swapping triggered?
1151 /* Notify the client side */
1154 // abort swap out, invalidating what was created so far (release follows)
1155 swapOutFileClose(StoreIOState::writerGone
);
1157 unlock(); /* unlock */
1161 * Clear Memory storage to accommodate the given object len
1164 storeGetMemSpace(int size
)
1166 PROF_start(storeGetMemSpace
);
1167 StoreEntry
*e
= NULL
;
1169 static time_t last_check
= 0;
1170 size_t pages_needed
;
1171 RemovalPurgeWalker
*walker
;
1173 if (squid_curtime
== last_check
) {
1174 PROF_stop(storeGetMemSpace
);
1178 last_check
= squid_curtime
;
1180 pages_needed
= (size
+ SM_PAGE_SIZE
-1) / SM_PAGE_SIZE
;
1182 if (mem_node::InUseCount() + pages_needed
< store_pages_max
) {
1183 PROF_stop(storeGetMemSpace
);
1187 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed
<<
1190 /* XXX what to set as max_scan here? */
1191 walker
= mem_policy
->PurgeInit(mem_policy
, 100000);
1193 while ((e
= walker
->Next(walker
))) {
1197 if (mem_node::InUseCount() + pages_needed
< store_pages_max
)
1201 walker
->Done(walker
);
1202 debugs(20, 3, "storeGetMemSpace stats:");
1203 debugs(20, 3, " " << std::setw(6) << hot_obj_count
<< " HOT objects");
1204 debugs(20, 3, " " << std::setw(6) << released
<< " were released");
1205 PROF_stop(storeGetMemSpace
);
1209 /* thunk through to Store::Root().maintain(). Note that this would be better still
1210 * if registered against the root store itself, but that requires more complex
1211 * update logic - bigger fish to fry first. Long term each store when
1212 * it becomes active will self register
1215 Store::Maintain(void *notused
)
1217 Store::Root().maintain();
1219 /* Reregister a maintain event .. */
1220 eventAdd("MaintainSwapSpace", Maintain
, NULL
, 1.0, 1);
1224 /* The maximum objects to scan for maintain storage space */
1225 #define MAINTAIN_MAX_SCAN 1024
1226 #define MAINTAIN_MAX_REMOVE 64
1229 * This routine is to be called by main loop in main.c.
1230 * It removes expired objects on only one bucket for each time called.
1232 * This should get called 1/s from main().
1235 StoreController::maintain()
1237 static time_t last_warn_time
= 0;
1239 PROF_start(storeMaintainSwapSpace
);
1240 swapDir
->maintain();
1242 /* this should be emitted by the oversize dir, not globally */
1244 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1245 if (squid_curtime
- last_warn_time
> 10) {
1246 debugs(20, DBG_CRITICAL
, "WARNING: Disk space over limit: "
1247 << Store::Root().currentSize() / 1024.0 << " KB > "
1248 << (Store::Root().maxSize() >> 10) << " KB");
1249 last_warn_time
= squid_curtime
;
1253 PROF_stop(storeMaintainSwapSpace
);
1256 /* release an object from a cache */
1258 StoreEntry::release()
1260 PROF_start(storeRelease
);
1261 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1262 /* If, for any reason we can't discard this object because of an
1263 * outstanding request, mark it for pending release */
1267 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1269 PROF_stop(storeRelease
);
1273 if (StoreController::store_dirs_rebuilding
&& swap_filen
> -1) {
1279 if (swap_filen
> -1) {
1281 * Fake a call to StoreEntry->lock() When rebuilding is done,
1282 * we'll just call StoreEntry->unlock() on these.
1286 LateReleaseStack
.push_back(this);
1288 destroyStoreEntry(static_cast<hash_link
*>(this));
1289 // "this" is no longer valid
1292 PROF_stop(storeRelease
);
1296 storeLog(STORE_LOG_RELEASE
, this);
1298 if (swap_filen
> -1) {
1299 // log before unlink() below clears swap_filen
1300 if (!EBIT_TEST(flags
, KEY_PRIVATE
))
1301 storeDirSwapLog(this, SWAP_LOG_DEL
);
1306 setMemStatus(NOT_IN_MEMORY
);
1307 destroyStoreEntry(static_cast<hash_link
*>(this));
1308 PROF_stop(storeRelease
);
1312 storeLateRelease(void *unused
)
1318 if (StoreController::store_dirs_rebuilding
) {
1319 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 1.0, 1);
1323 for (i
= 0; i
< 10; i
++) {
1324 e
= LateReleaseStack
.count
? LateReleaseStack
.pop() : NULL
;
1328 debugs(20, 1, "storeLateRelease: released " << n
<< " objects");
1336 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 0.0, 1);
1339 /* return 1 if a store entry is locked */
1341 StoreEntry::locked() const
1346 if (swap_status
== SWAPOUT_WRITING
)
1349 if (store_status
== STORE_PENDING
)
1353 * SPECIAL, PUBLIC entries should be "locked"
1355 if (EBIT_TEST(flags
, ENTRY_SPECIAL
))
1356 if (!EBIT_TEST(flags
, KEY_PRIVATE
))
1363 StoreEntry::validLength() const
1366 const HttpReply
*reply
;
1367 assert(mem_obj
!= NULL
);
1369 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1370 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1372 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply
->hdr_sz
);
1373 debugs(20, 5, "storeEntryValidLength: content_length = " << reply
->content_length
);
1375 if (reply
->content_length
< 0) {
1376 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1380 if (reply
->hdr_sz
== 0) {
1381 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1385 if (mem_obj
->method
== METHOD_HEAD
) {
1386 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1390 if (reply
->sline
.status
== HTTP_NOT_MODIFIED
)
1393 if (reply
->sline
.status
== HTTP_NO_CONTENT
)
1396 diff
= reply
->hdr_sz
+ reply
->content_length
- objectLen();
1401 debugs(20, 3, "storeEntryValidLength: " << (diff
< 0 ? -diff
: diff
) << " bytes too " << (diff
< 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1407 storeRegisterWithCacheManager(void)
1409 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats
, 0, 1);
1410 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create
, 0, 1);
1411 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1412 storeCheckCachableStats
, 0, 1);
1419 mem_policy
= createRemovalPolicy(Config
.memPolicy
);
1422 eventAdd("storeLateRelease", storeLateRelease
, NULL
, 1.0, 1);
1423 Store::Root().init();
1424 storeRebuildStart();
1426 storeRegisterWithCacheManager();
1430 storeConfigure(void)
1432 store_swap_high
= (long) (((float) Store::Root().maxSize() *
1433 (float) Config
.Swap
.highWaterMark
) / (float) 100);
1434 store_swap_low
= (long) (((float) Store::Root().maxSize() *
1435 (float) Config
.Swap
.lowWaterMark
) / (float) 100);
1436 store_pages_max
= Config
.memMaxSize
/ sizeof(mem_node
);
1440 StoreEntry::memoryCachable() const
1442 if (mem_obj
== NULL
)
1445 if (mem_obj
->data_hdr
.size() == 0)
1448 if (mem_obj
->inmem_lo
!= 0)
1451 if (!Config
.onoff
.memory_cache_first
&& swap_status
== SWAPOUT_DONE
&& refcount
== 1)
1454 if (Config
.memShared
&& IamWorkerProcess()) {
1455 const int64_t expectedSize
= mem_obj
->expectedReplySize();
1456 // objects of unknown size are not allowed into memory cache, for now
1457 if (expectedSize
< 0 ||
1458 expectedSize
> static_cast<int64_t>(Config
.Store
.maxInMemObjSize
))
1466 StoreEntry::checkNegativeHit() const
1468 if (!EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1471 if (expires
<= squid_curtime
)
1474 if (store_status
!= STORE_OK
)
1481 * Set object for negative caching.
1482 * Preserves any expiry information given by the server.
1483 * In absence of proper expiry info it will set to expire immediately,
1484 * or with HTTP-violations enabled the configured negative-TTL is observed
1487 StoreEntry::negativeCache()
1489 // XXX: should make the default for expires 0 instead of -1
1490 // so we can distinguish "Expires: -1" from nothing.
1492 #if USE_HTTP_VIOLATIONS
1493 expires
= squid_curtime
+ Config
.negativeTtl
;
1495 expires
= squid_curtime
;
1497 EBIT_SET(flags
, ENTRY_NEGCACHED
);
1501 storeFreeMemory(void)
1504 #if USE_CACHE_DIGESTS
1507 cacheDigestDestroy(store_digest
);
1511 store_digest
= NULL
;
1515 expiresMoreThan(time_t expires
, time_t when
)
1517 if (expires
< 0) /* No Expires given */
1520 return (expires
> (squid_curtime
+ when
));
1524 StoreEntry::validToSend() const
1526 if (EBIT_TEST(flags
, RELEASE_REQUEST
))
1529 if (EBIT_TEST(flags
, ENTRY_NEGCACHED
))
1530 if (expires
<= squid_curtime
)
1533 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
1540 StoreEntry::timestampsSet()
1542 const HttpReply
*reply
= getReply();
1543 time_t served_date
= reply
->date
;
1544 int age
= reply
->header
.getInt(HDR_AGE
);
1545 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1546 /* make sure that 0 <= served_date <= squid_curtime */
1548 if (served_date
< 0 || served_date
> squid_curtime
)
1549 served_date
= squid_curtime
;
1552 * If the returned Date: is more than 24 hours older than
1553 * the squid_curtime, then one of us needs to use NTP to set our
1554 * clock. We'll pretend that our clock is right.
1556 else if (served_date
< (squid_curtime
- 24 * 60 * 60) )
1557 served_date
= squid_curtime
;
1560 * Compensate with Age header if origin server clock is ahead
1561 * of us and there is a cache in between us and the origin
1562 * server. But DONT compensate if the age value is larger than
1563 * squid_curtime because it results in a negative served_date.
1565 if (age
> squid_curtime
- served_date
)
1566 if (squid_curtime
> age
)
1567 served_date
= squid_curtime
- age
;
1569 // compensate for Squid-to-server and server-to-Squid delays
1570 if (mem_obj
&& mem_obj
->request
) {
1571 const time_t request_sent
=
1572 mem_obj
->request
->hier
.peer_http_request_sent
.tv_sec
;
1573 if (0 < request_sent
&& request_sent
< squid_curtime
)
1574 served_date
-= (squid_curtime
- request_sent
);
1577 if (reply
->expires
> 0 && reply
->date
> -1)
1578 expires
= served_date
+ (reply
->expires
- reply
->date
);
1580 expires
= reply
->expires
;
1582 lastmod
= reply
->last_modified
;
1584 timestamp
= served_date
;
1588 StoreEntry::registerAbort(STABH
* cb
, void *data
)
1591 assert(mem_obj
->abort
.callback
== NULL
);
1592 mem_obj
->abort
.callback
= cb
;
1593 mem_obj
->abort
.data
= cbdataReference(data
);
1597 StoreEntry::unregisterAbort()
1600 if (mem_obj
->abort
.callback
) {
1601 mem_obj
->abort
.callback
= NULL
;
1602 cbdataReferenceDone(mem_obj
->abort
.data
);
1607 StoreEntry::dump(int l
) const
1609 debugs(20, l
, "StoreEntry->key: " << getMD5Text());
1610 debugs(20, l
, "StoreEntry->next: " << next
);
1611 debugs(20, l
, "StoreEntry->mem_obj: " << mem_obj
);
1612 debugs(20, l
, "StoreEntry->timestamp: " << timestamp
);
1613 debugs(20, l
, "StoreEntry->lastref: " << lastref
);
1614 debugs(20, l
, "StoreEntry->expires: " << expires
);
1615 debugs(20, l
, "StoreEntry->lastmod: " << lastmod
);
1616 debugs(20, l
, "StoreEntry->swap_file_sz: " << swap_file_sz
);
1617 debugs(20, l
, "StoreEntry->refcount: " << refcount
);
1618 debugs(20, l
, "StoreEntry->flags: " << storeEntryFlags(this));
1619 debugs(20, l
, "StoreEntry->swap_dirn: " << swap_dirn
);
1620 debugs(20, l
, "StoreEntry->swap_filen: " << swap_filen
);
1621 debugs(20, l
, "StoreEntry->lock_count: " << lock_count
);
1622 debugs(20, l
, "StoreEntry->mem_status: " << mem_status
);
1623 debugs(20, l
, "StoreEntry->ping_status: " << ping_status
);
1624 debugs(20, l
, "StoreEntry->store_status: " << store_status
);
1625 debugs(20, l
, "StoreEntry->swap_status: " << swap_status
);
1629 * NOTE, this function assumes only two mem states
1632 StoreEntry::setMemStatus(mem_status_t new_status
)
1634 if (new_status
== mem_status
)
1637 // are we using a shared memory cache?
1638 if (Config
.memShared
&& IamWorkerProcess()) {
1639 assert(new_status
!= IN_MEMORY
); // we do not call this otherwise
1640 // This method was designed to update replacement policy, not to
1641 // actually purge something from the memory cache (TODO: rename?).
1642 // Shared memory cache does not have a policy that needs updates.
1643 mem_status
= new_status
;
1647 assert(mem_obj
!= NULL
);
1649 if (new_status
== IN_MEMORY
) {
1650 assert(mem_obj
->inmem_lo
== 0);
1652 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1653 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj
->url
<< " into policy");
1655 mem_policy
->Add(mem_policy
, this, &mem_obj
->repl
);
1656 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj
->url
<< " key: " << getMD5Text());
1659 hot_obj_count
++; // TODO: maintain for the shared hot cache as well
1661 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1662 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj
->url
);
1664 mem_policy
->Remove(mem_policy
, this, &mem_obj
->repl
);
1665 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj
->url
);
1671 mem_status
= new_status
;
1675 StoreEntry::url() const
1678 return "[null_entry]";
1679 else if (mem_obj
== NULL
)
1680 return "[null_mem_obj]";
1682 return mem_obj
->url
;
1686 StoreEntry::createMemObject(const char *aUrl
, const char *aLogUrl
)
1691 if (hidden_mem_obj
) {
1692 debugs(20, 3, HERE
<< "restoring " << hidden_mem_obj
);
1693 mem_obj
= hidden_mem_obj
;
1694 hidden_mem_obj
= NULL
;
1695 mem_obj
->resetUrls(aUrl
, aLogUrl
);
1699 mem_obj
= new MemObject(aUrl
, aLogUrl
);
1702 /* this just sets DELAY_SENDING */
1704 StoreEntry::buffer()
1706 EBIT_SET(flags
, DELAY_SENDING
);
1709 /* this just clears DELAY_SENDING and Invokes the handlers */
1713 if (EBIT_TEST(flags
, DELAY_SENDING
)) {
1714 EBIT_CLR(flags
, DELAY_SENDING
);
1720 StoreEntry::objectLen() const
1722 assert(mem_obj
!= NULL
);
1723 return mem_obj
->object_sz
;
1727 StoreEntry::contentLen() const
1729 assert(mem_obj
!= NULL
);
1730 assert(getReply() != NULL
);
1731 return objectLen() - getReply()->hdr_sz
;
1735 StoreEntry::getReply () const
1737 if (NULL
== mem_obj
)
1740 return mem_obj
->getReply();
1747 debugs(20, 3, "StoreEntry::reset: " << url());
1749 HttpReply
*rep
= (HttpReply
*) getReply(); // bypass const
1751 expires
= lastmod
= timestamp
= -1;
1757 * This routine calls the SETUP routine for each fs type.
1758 * I don't know where the best place for this is, and I'm not going to shuffle
1759 * around large chunks of code right now (that can be done once its working.)
1768 * called to add another store removal policy module
1771 storeReplAdd(const char *type
, REMOVALPOLICYCREATE
* create
)
1775 /* find the number of currently known repl types */
1776 for (i
= 0; storerepl_list
&& storerepl_list
[i
].typestr
; i
++) {
1777 if (strcmp(storerepl_list
[i
].typestr
, type
) == 0) {
1778 debugs(20, 1, "WARNING: Trying to load store replacement policy " << type
<< " twice.");
1783 /* add the new type */
1784 storerepl_list
= static_cast<storerepl_entry_t
*>(xrealloc(storerepl_list
, (i
+ 2) * sizeof(storerepl_entry_t
)));
1786 memset(&storerepl_list
[i
+ 1], 0, sizeof(storerepl_entry_t
));
1788 storerepl_list
[i
].typestr
= type
;
1790 storerepl_list
[i
].create
= create
;
1794 * Create a removal policy instance
1797 createRemovalPolicy(RemovalPolicySettings
* settings
)
1799 storerepl_entry_t
*r
;
1801 for (r
= storerepl_list
; r
&& r
->typestr
; r
++) {
1802 if (strcmp(r
->typestr
, settings
->type
) == 0)
1803 return r
->create(settings
->args
);
1806 debugs(20, 1, "ERROR: Unknown policy " << settings
->type
);
1807 debugs(20, 1, "ERROR: Be sure to have set cache_replacement_policy");
1808 debugs(20, 1, "ERROR: and memory_replacement_policy in squid.conf!");
1809 fatalf("ERROR: Unknown policy %s\n", settings
->type
);
1810 return NULL
; /* NOTREACHED */
1815 storeSwapFileNumberSet(StoreEntry
* e
, sfileno filn
)
1817 if (e
->swap_file_number
== filn
)
1822 storeDirMapBitReset(e
->swap_file_number
);
1823 storeDirLRUDelete(e
);
1824 e
->swap_file_number
= -1;
1826 assert(-1 == e
->swap_file_number
);
1827 storeDirMapBitSet(e
->swap_file_number
= filn
);
1836 * Replace a store entry with
1837 * a new reply. This eats the reply.
1840 StoreEntry::replaceHttpReply(HttpReply
*rep
, bool andStartWriting
)
1842 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1845 debugs(20, 0, "Attempt to replace object with no in-memory representation");
1849 mem_obj
->replaceHttpReply(rep
);
1851 if (andStartWriting
)
1857 StoreEntry::startWriting()
1861 /* TODO: when we store headers serparately remove the header portion */
1862 /* TODO: mark the length of the headers ? */
1863 /* We ONLY want the headers */
1864 packerToStoreInit(&p
, this);
1869 const HttpReply
*rep
= getReply();
1872 rep
->packHeadersInto(&p
);
1873 mem_obj
->markEndOfReplyHeaders();
1875 httpBodyPackInto(&rep
->body
, &p
);
1882 StoreEntry::getSerialisedMetaData()
1884 StoreMeta
*tlv_list
= storeSwapMetaBuild(this);
1886 char *result
= storeSwapMetaPack(tlv_list
, &swap_hdr_sz
);
1887 storeSwapTLVFree(tlv_list
);
1888 assert (swap_hdr_sz
>= 0);
1889 mem_obj
->swap_hdr_sz
= (size_t) swap_hdr_sz
;
1894 StoreEntry::swapoutPossible()
1896 if (!Config
.cacheSwap
.n_configured
)
1899 /* should we swap something out to disk? */
1900 debugs(20, 7, "storeSwapOut: " << url());
1901 debugs(20, 7, "storeSwapOut: store_status = " << storeStatusStr
[store_status
]);
1904 MemObject::SwapOut::Decision
&decision
= mem_obj
->swapout
.decision
;
1906 // if we decided that swapout is not possible, do not repeat same checks
1907 if (decision
== MemObject::SwapOut::swImpossible
) {
1908 debugs(20, 3, "storeSwapOut: already rejected");
1912 // this flag may change so we must check it even if we already said "yes"
1913 if (EBIT_TEST(flags
, ENTRY_ABORTED
)) {
1914 assert(EBIT_TEST(flags
, RELEASE_REQUEST
));
1915 // StoreEntry::abort() already closed the swap out file, if any
1916 decision
= MemObject::SwapOut::swImpossible
;
1920 // if we decided that swapout is possible, do not repeat same checks
1921 if (decision
== MemObject::SwapOut::swPossible
) {
1922 debugs(20, 3, "storeSwapOut: already allowed");
1926 // if we are swapping out already, do not repeat same checks
1927 if (swap_status
!= SWAPOUT_NONE
) {
1928 debugs(20, 3, "storeSwapOut: already started");
1929 decision
= MemObject::SwapOut::swPossible
;
1933 if (!checkCachable()) {
1934 debugs(20, 3, "storeSwapOut: not cachable");
1935 decision
= MemObject::SwapOut::swImpossible
;
1939 if (EBIT_TEST(flags
, ENTRY_SPECIAL
)) {
1940 debugs(20, 3, "storeSwapOut: " << url() << " SPECIAL");
1941 decision
= MemObject::SwapOut::swImpossible
;
1945 // check cache_dir max-size limit if all cache_dirs have it
1946 if (store_maxobjsize
>= 0) {
1947 // TODO: add estimated store metadata size to be conservative
1949 // use guaranteed maximum if it is known
1950 const int64_t expectedEnd
= mem_obj
->expectedReplySize();
1951 debugs(20, 7, "storeSwapOut: expectedEnd = " << expectedEnd
);
1952 if (expectedEnd
> store_maxobjsize
) {
1953 debugs(20, 3, "storeSwapOut: will not fit: " << expectedEnd
<<
1954 " > " << store_maxobjsize
);
1955 decision
= MemObject::SwapOut::swImpossible
;
1956 return false; // known to outgrow the limit eventually
1959 // use current minimum (always known)
1960 const int64_t currentEnd
= mem_obj
->endOffset();
1961 if (currentEnd
> store_maxobjsize
) {
1962 debugs(20, 3, "storeSwapOut: does not fit: " << currentEnd
<<
1963 " > " << store_maxobjsize
);
1964 decision
= MemObject::SwapOut::swImpossible
;
1965 return false; // already does not fit and may only get bigger
1968 // prevent default swPossible answer for yet unknown length
1969 if (expectedEnd
< 0) {
1970 debugs(20, 3, "storeSwapOut: wait for more info: " <<
1972 return false; // may fit later, but will be rejected now
1976 decision
= MemObject::SwapOut::swPossible
;
1981 StoreEntry::trimMemory()
1985 * Bug #1943. We must not let go any data for IN_MEMORY
1986 * objects. We have to wait until the mem_status changes.
1988 if (mem_status
== IN_MEMORY
)
1991 if (!swapOutAble()) {
1992 if (mem_obj
->policyLowestOffsetToKeep(0) == 0) {
1997 * Its not swap-able, and we're about to delete a chunk,
1998 * so we must make it PRIVATE. This is tricky/ugly because
1999 * for the most part, we treat swapable == cachable here.
2002 mem_obj
->trimUnSwappable ();
2004 mem_obj
->trimSwappable ();
2009 StoreEntry::modifiedSince(HttpRequest
* request
) const
2012 time_t mod_time
= lastmod
;
2015 mod_time
= timestamp
;
2017 debugs(88, 3, "modifiedSince: '" << url() << "'");
2019 debugs(88, 3, "modifiedSince: mod_time = " << mod_time
);
2024 /* Find size of the object */
2025 object_length
= getReply()->content_length
;
2027 if (object_length
< 0)
2028 object_length
= contentLen();
2030 if (mod_time
> request
->ims
) {
2031 debugs(88, 3, "--> YES: entry newer than client");
2033 } else if (mod_time
< request
->ims
) {
2034 debugs(88, 3, "--> NO: entry older than client");
2036 } else if (request
->imslen
< 0) {
2037 debugs(88, 3, "--> NO: same LMT, no client length");
2039 } else if (request
->imslen
== object_length
) {
2040 debugs(88, 3, "--> NO: same LMT, same length");
2043 debugs(88, 3, "--> YES: same LMT, different length");
2049 StoreEntry::hasIfMatchEtag(const HttpRequest
&request
) const
2051 const String reqETags
= request
.header
.getList(HDR_IF_MATCH
);
2052 return hasOneOfEtags(reqETags
, false);
2056 StoreEntry::hasIfNoneMatchEtag(const HttpRequest
&request
) const
2058 const String reqETags
= request
.header
.getList(HDR_IF_NONE_MATCH
);
2059 // weak comparison is allowed only for HEAD or full-body GET requests
2060 const bool allowWeakMatch
= !request
.flags
.range
&&
2061 (request
.method
== METHOD_GET
|| request
.method
== METHOD_HEAD
);
2062 return hasOneOfEtags(reqETags
, allowWeakMatch
);
2065 /// whether at least one of the request ETags matches entity ETag
2067 StoreEntry::hasOneOfEtags(const String
&reqETags
, const bool allowWeakMatch
) const
2069 const ETag repETag
= getReply()->header
.getETag(HDR_ETAG
);
2071 return strListIsMember(&reqETags
, "*", ',');
2073 bool matched
= false;
2074 const char *pos
= NULL
;
2077 while (!matched
&& strListGetItem(&reqETags
, ',', &item
, &ilen
, &pos
)) {
2078 if (!strncmp(item
, "*", ilen
))
2082 str
.append(item
, ilen
);
2084 if (etagParseInit(&reqETag
, str
.termedBuf())) {
2085 matched
= allowWeakMatch
? etagIsWeakEqual(repETag
, reqETag
) :
2086 etagIsStrongEqual(repETag
, reqETag
);
2094 StoreEntry::store() const
2096 assert(0 <= swap_dirn
&& swap_dirn
< Config
.cacheSwap
.n_configured
);
2097 return INDEXSD(swap_dirn
);
2101 StoreEntry::unlink()
2103 store()->unlink(*this); // implies disconnect()
2106 swap_status
= SWAPOUT_NONE
;
2110 * return true if the entry is in a state where
2111 * it can accept more data (ie with write() method)
2114 StoreEntry::isAccepting() const
2116 if (STORE_PENDING
!= store_status
)
2119 if (EBIT_TEST(flags
, ENTRY_ABORTED
))
2125 std::ostream
&operator <<(std::ostream
&os
, const StoreEntry
&e
)
2127 return os
<< e
.swap_filen
<< '@' << e
.swap_dirn
<< '=' <<
2128 e
.mem_status
<< '/' << e
.ping_status
<< '/' << e
.store_status
<< '/' <<
2132 /* NullStoreEntry */
2134 NullStoreEntry
NullStoreEntry::_instance
;
2137 NullStoreEntry::getInstance()
2143 NullStoreEntry::getMD5Text() const
2149 NullStoreEntry::operator delete(void*)
2151 fatal ("Attempt to delete NullStoreEntry\n");
2155 NullStoreEntry::getSerialisedMetaData()
2161 #include "Store.cci"