]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store.cc
Boilerplate: update copyright blurbs on src/
[thirdparty/squid.git] / src / store.cc
CommitLineData
30a4f2a8 1/*
bbc27441 2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
e25c139f 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
c943f331 7 */
090089c4 8
bbc27441
AJ
9/* DEBUG: section 20 Storage Manager */
10
582c2af2 11#include "squid.h"
b814e8d4 12#include "CacheDigest.h"
ec20038e
AJ
13#include "CacheManager.h"
14#include "comm/Connection.h"
7e66d5e2 15#include "comm/Read.h"
81a94152 16#include "ETag.h"
a553a5a3 17#include "event.h"
04f55905 18#include "fde.h"
af69c635 19#include "globals.h"
582c2af2 20#include "http.h"
528b2c61 21#include "HttpReply.h"
22#include "HttpRequest.h"
528b2c61 23#include "mem_node.h"
582c2af2
FC
24#include "MemObject.h"
25#include "mgr/Registration.h"
26#include "mgr/StoreIoAction.h"
27#include "profiler/Profiler.h"
e452f48d 28#include "repl_modules.h"
f206b652 29#include "RequestFlags.h"
4d5904f7 30#include "SquidConfig.h"
582c2af2 31#include "SquidTime.h"
e4f1fdae 32#include "StatCounters.h"
582c2af2 33#include "stmem.h"
602d9612 34#include "Store.h"
35a28a37 35#include "store_digest.h"
fb548aaf 36#include "store_key_md5.h"
e87137f1 37#include "store_key_md5.h"
10818c0a 38#include "store_log.h"
687f5275 39#include "store_rebuild.h"
e87137f1
FC
40#include "StoreClient.h"
41#include "StoreIOState.h"
42#include "StoreMeta.h"
43#include "StrList.h"
582c2af2 44#include "swap_log_op.h"
e87137f1 45#include "SwapDir.h"
5bed43d6 46#include "tools.h"
9a0a18de 47#if USE_DELAY_POOLS
b67e2c8c 48#include "DelayPools.h"
49#endif
090089c4 50
074d6a40 51#include <climits>
cfb88efb 52#include <stack>
06e91875 53
090089c4 54#define REBUILD_TIMESTAMP_DELTA_MAX 2
227fbb74 55
c21ad0f5 56#define STORE_IN_MEM_BUCKETS (229)
090089c4 57
4b981814
AJ
58/** \todo Convert these string constants to enum string-arrays generated */
59
26ac0430
AJ
60const char *memStatusStr[] = {
61 "NOT_IN_MEMORY",
62 "IN_MEMORY"
63};
64
65const char *pingStatusStr[] = {
66 "PING_NONE",
67 "PING_WAITING",
68 "PING_DONE"
69};
70
71const char *storeStatusStr[] = {
72 "STORE_OK",
73 "STORE_PENDING"
74};
75
76const char *swapStatusStr[] = {
77 "SWAPOUT_NONE",
78 "SWAPOUT_WRITING",
79 "SWAPOUT_DONE"
80};
9dfb6c1c 81
25b6a907 82/*
83 * This defines an repl type
84 */
85
86typedef struct _storerepl_entry storerepl_entry_t;
87
26ac0430 88struct _storerepl_entry {
25b6a907 89 const char *typestr;
90 REMOVALPOLICYCREATE *create;
91};
92
93static storerepl_entry_t *storerepl_list = NULL;
94
e3ef2b09 95/*
96 * local function prototypes
97 */
007b8be4 98static int getKeyCounter(void);
8423ff74 99static OBJH storeCheckCachableStats;
e42d5181 100static EVH storeLateRelease;
a21fbb54 101
e3ef2b09 102/*
103 * local variables
104 */
cfb88efb 105static std::stack<StoreEntry*> LateReleaseStack;
04eb0689 106MemAllocator *StoreEntry::pool = NULL;
e6ccf245 107
c8f4eac4 108StorePointer Store::CurrentRoot = NULL;
109
110void
111Store::Root(Store * aRoot)
112{
113 CurrentRoot = aRoot;
114}
115
116void
117Store::Root(StorePointer aRoot)
118{
119 Root(aRoot.getRaw());
120}
121
122void
123Store::Stats(StoreEntry * output)
124{
125 assert (output);
126 Root().stat(*output);
127}
128
129void
130Store::create()
131{}
132
133void
134Store::diskFull()
135{}
136
137void
138Store::sync()
139{}
140
141void
142Store::unlink (StoreEntry &anEntry)
143{
144 fatal("Store::unlink on invalid Store\n");
145}
146
e6ccf245 147void *
3b13a8fd 148StoreEntry::operator new (size_t bytecount)
e6ccf245 149{
3b13a8fd 150 assert (bytecount == sizeof (StoreEntry));
62e76326 151
e6ccf245 152 if (!pool) {
04eb0689 153 pool = memPoolCreate ("StoreEntry", bytecount);
b001e822 154 pool->setChunkSize(2048 * 1024);
e6ccf245 155 }
62e76326 156
b001e822 157 return pool->alloc();
e6ccf245 158}
159
160void
3b13a8fd 161StoreEntry::operator delete (void *address)
e6ccf245 162{
dc47f531 163 pool->freeOne(address);
e6ccf245 164}
165
5ed72359 166void
167StoreEntry::makePublic()
168{
169 /* This object can be cached for a long time */
170
6919be24 171 if (!EBIT_TEST(flags, RELEASE_REQUEST))
d88e3c49 172 setPublicKey();
5ed72359 173}
174
175void
176StoreEntry::makePrivate()
177{
178 /* This object should never be cached at all */
d88e3c49 179 expireNow();
180 releaseRequest(); /* delete object when not used */
5ed72359 181}
182
183void
184StoreEntry::cacheNegatively()
185{
186 /* This object may be negatively cached */
d88e3c49 187 negativeCache();
6919be24 188 makePublic();
5ed72359 189}
190
e6ccf245 191size_t
3b13a8fd 192StoreEntry::inUseCount()
e6ccf245 193{
194 if (!pool)
62e76326 195 return 0;
9f9e06f3 196 return pool->getInUseCount();
e6ccf245 197}
198
332dafa2 199const char *
3b13a8fd 200StoreEntry::getMD5Text() const
332dafa2 201{
202 return storeKeyText((const cache_key *)key);
203}
204
a46d2c0e 205#include "comm.h"
206
207void
208StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
e6ccf245 209{
a46d2c0e 210 StoreEntry *anEntry = (StoreEntry *)theContext;
3e4bebf8 211 anEntry->delayAwareRead(aRead.conn,
a46d2c0e 212 aRead.buf,
213 aRead.len,
65517dc8 214 aRead.callback);
e6ccf245 215}
66cedb85 216
a46d2c0e 217void
3e4bebf8 218StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
a46d2c0e 219{
220 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
221 /* sketch: readdeferer* = getdeferer.
65517dc8 222 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
a46d2c0e 223 */
224
225 if (amountToRead == 0) {
226 assert (mem_obj);
227 /* read ahead limit */
228 /* Perhaps these two calls should both live in MemObject */
9a0a18de 229#if USE_DELAY_POOLS
a46d2c0e 230 if (!mem_obj->readAheadPolicyCanRead()) {
a5f42284 231#endif
3e4bebf8 232 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
a46d2c0e 233 return;
9a0a18de 234#if USE_DELAY_POOLS
a46d2c0e 235 }
236
237 /* delay id limit */
3e4bebf8 238 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
a46d2c0e 239 return;
a5f42284 240
241#endif
242
a46d2c0e 243 }
244
109cf61c 245 if (fd_table[conn->fd].closing()) {
f5f9e44c
AR
246 // Readers must have closing callbacks if they want to be notified. No
247 // readers appeared to care around 2009/12/14 as they skipped reading
248 // for other reasons. Closing may already be true at the delyaAwareRead
249 // call time or may happen while we wait after delayRead() above.
109cf61c 250 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
f5f9e44c
AR
251 callback);
252 return; // the read callback will never be called
253 }
254
3e4bebf8 255 comm_read(conn, buf, amountToRead, callback);
a46d2c0e 256}
257
258size_t
384a7590 259StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
528b2c61 260{
528b2c61 261 if (mem_obj == NULL)
4dc2b072 262 return aRange.end;
62e76326 263
bc87dc25 264#if URL_CHECKSUM_DEBUG
62e76326 265
528b2c61 266 mem_obj->checkUrlChecksum();
62e76326 267
528b2c61 268#endif
62e76326 269
a46d2c0e 270 if (!mem_obj->readAheadPolicyCanRead())
271 return 0;
62e76326 272
384a7590 273 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
a46d2c0e 274}
62e76326 275
a46d2c0e 276bool
277StoreEntry::checkDeferRead(int fd) const
278{
279 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
280}
62e76326 281
a46d2c0e 282void
283StoreEntry::setNoDelay (bool const newValue)
284{
285 if (mem_obj)
286 mem_obj->setNoDelay(newValue);
528b2c61 287}
bc87dc25 288
f25d697f
AR
289// XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
290// open swapin file, aggressively trim memory, and ignore read-ahead gap.
291// It does not mean we will read from disk exclusively (or at all!).
292// XXX: May create STORE_DISK_CLIENT with no disk caching configured.
293// XXX: Collapsed clients cannot predict their type.
528b2c61 294store_client_t
295StoreEntry::storeClientType() const
227fbb74 296{
7d31d5fa 297 /* The needed offset isn't in memory
298 * XXX TODO: this is wrong for range requests
299 * as the needed offset may *not* be 0, AND
300 * offset 0 in the memory object is the HTTP headers.
301 */
302
9487bae9
AR
303 assert(mem_obj);
304
528b2c61 305 if (mem_obj->inmem_lo)
62e76326 306 return STORE_DISK_CLIENT;
307
528b2c61 308 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
62e76326 309 /* I don't think we should be adding clients to aborted entries */
e0236918 310 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
62e76326 311 return STORE_MEM_CLIENT;
528b2c61 312 }
62e76326 313
528b2c61 314 if (store_status == STORE_OK) {
7d31d5fa 315 /* the object has completed. */
316
344a9006 317 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
47ce0a58 318 if (swap_status == SWAPOUT_DONE) {
e2851fe7 319 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
e1381638 320 if (mem_obj->endOffset() == mem_obj->object_sz) {
f25d697f 321 /* hot object fully swapped in (XXX: or swapped out?) */
e1381638
AJ
322 return STORE_MEM_CLIENT;
323 }
47ce0a58 324 } else {
e1381638
AJ
325 /* Memory-only, or currently being swapped out */
326 return STORE_MEM_CLIENT;
47ce0a58
HN
327 }
328 }
329 return STORE_DISK_CLIENT;
528b2c61 330 }
62e76326 331
528b2c61 332 /* here and past, entry is STORE_PENDING */
333 /*
334 * If this is the first client, let it be the mem client
335 */
336 if (mem_obj->nclients == 1)
62e76326 337 return STORE_MEM_CLIENT;
338
528b2c61 339 /*
340 * If there is no disk file to open yet, we must make this a
341 * mem client. If we can't open the swapin file before writing
342 * to the client, there is no guarantee that we will be able
343 * to open it later when we really need it.
344 */
345 if (swap_status == SWAPOUT_NONE)
62e76326 346 return STORE_MEM_CLIENT;
347
528b2c61 348 /*
349 * otherwise, make subsequent clients read from disk so they
350 * can not delay the first, and vice-versa.
351 */
352 return STORE_DISK_CLIENT;
227fbb74 353}
354
8ebae981
AJ
355StoreEntry::StoreEntry() :
356 mem_obj(NULL),
ec4c70ce
AJ
357 timestamp(-1),
358 lastref(-1),
8ebae981 359 expires(-1),
c95983c8 360 lastmod(-1),
8ebae981
AJ
361 swap_file_sz(0),
362 refcount(0),
363 flags(0),
364 swap_filen(-1),
365 swap_dirn(-1),
8ebae981
AJ
366 mem_status(NOT_IN_MEMORY),
367 ping_status(PING_NONE),
368 store_status(STORE_PENDING),
1bfe9ade
AR
369 swap_status(SWAPOUT_NONE),
370 lock_count(0)
090089c4 371{
539283df 372 debugs(20, 5, "StoreEntry constructed, this=" << this);
c8f4eac4 373}
62e76326 374
6d8d05b5
DK
375StoreEntry::~StoreEntry()
376{
539283df 377 debugs(20, 5, "StoreEntry destructed, this=" << this);
6d8d05b5
DK
378}
379
0ad2b63b
CT
380#if USE_ADAPTATION
381void
382StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
383{
384 if (!deferredProducer)
385 deferredProducer = producer;
386 else
e83cdc25 387 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
0ad2b63b
CT
388 *deferredProducer << ", requested call: " << *producer);
389}
390
391void
392StoreEntry::kickProducer()
393{
e83cdc25 394 if (deferredProducer != NULL) {
0ad2b63b
CT
395 ScheduleCallHere(deferredProducer);
396 deferredProducer = NULL;
397 }
398}
399#endif
400
3900307b 401void
402StoreEntry::destroyMemObject()
090089c4 403{
3900307b 404 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
29c56e41
AR
405
406 if (MemObject *mem = mem_obj) {
407 // Store::Root() is FATALly missing during shutdown
408 if (mem->xitTable.index >= 0 && !shutting_down)
409 Store::Root().transientsDisconnect(*mem);
410 if (mem->memCache.index >= 0 && !shutting_down)
411 Store::Root().memoryDisconnect(*this);
412
413 setMemStatus(NOT_IN_MEMORY);
414 mem_obj = NULL;
415 delete mem;
416 }
090089c4 417}
418
c8f4eac4 419void
528b2c61 420destroyStoreEntry(void *data)
090089c4 421{
59dbbc5c 422 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
c8f4eac4 423 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
9e975e4e 424 assert(e != NULL);
62e76326 425
e6ccf245 426 if (e == NullStoreEntry::getInstance())
62e76326 427 return;
428
fa6d2c65
AR
429 // Store::Root() is FATALly missing during shutdown
430 if (e->swap_filen >= 0 && !shutting_down) {
431 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
432 sd.disconnect(*e);
433 }
434
3900307b 435 e->destroyMemObject();
62e76326 436
3900307b 437 e->hashDelete();
62e76326 438
332dafa2 439 assert(e->key == NULL);
62e76326 440
e6ccf245 441 delete e;
227fbb74 442}
090089c4 443
090089c4 444/* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
445
f09f5b26 446void
3900307b 447StoreEntry::hashInsert(const cache_key * someKey)
090089c4 448{
4475555f 449 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
3900307b 450 key = storeKeyDup(someKey);
451 hash_join(store_table, this);
090089c4 452}
453
3900307b 454void
455StoreEntry::hashDelete()
090089c4 456{
cb868059
AR
457 if (key) { // some test cases do not create keys and do not hashInsert()
458 hash_remove_link(store_table, this);
459 storeKeyFree((const cache_key *)key);
460 key = NULL;
461 }
090089c4 462}
463
090089c4 464/* -------------------------------------------------------------------------- */
465
090089c4 466/* get rid of memory copy of the object */
d88e3c49 467void
468StoreEntry::purgeMem()
090089c4 469{
d88e3c49 470 if (mem_obj == NULL)
62e76326 471 return;
472
bf8fe701 473 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
62e76326 474
ce49546e 475 Store::Root().memoryUnlink(*this);
62e76326 476
d88e3c49 477 if (swap_status != SWAPOUT_DONE)
478 release();
090089c4 479}
480
6a566b9c 481void
18994992 482StoreEntry::lock(const char *context)
6a566b9c 483{
5db6bf73 484 ++lock_count;
18994992
AR
485 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
486}
487
488void
9d4e9cfb
AR
489StoreEntry::touch()
490{
c21ad0f5 491 lastref = squid_curtime;
492 Store::Root().reference(*this);
090089c4 493}
494
43ae1d95 495void
496StoreEntry::setReleaseFlag()
497{
498 if (EBIT_TEST(flags, RELEASE_REQUEST))
499 return;
500
bf8fe701 501 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
43ae1d95 502
503 EBIT_SET(flags, RELEASE_REQUEST);
1bfe9ade
AR
504
505 Store::Root().markForUnlink(*this);
43ae1d95 506}
507
b8d8561b 508void
d88e3c49 509StoreEntry::releaseRequest()
2285407f 510{
d88e3c49 511 if (EBIT_TEST(flags, RELEASE_REQUEST))
62e76326 512 return;
513
6919be24 514 setReleaseFlag(); // makes validToSend() false, preventing future hits
62e76326 515
d88e3c49 516 setPrivateKey();
2285407f 517}
518
b8d8561b 519int
18994992 520StoreEntry::unlock(const char *context)
090089c4 521{
18994992
AR
522 debugs(20, 3, (context ? context : "somebody") <<
523 " unlocking key " << getMD5Text() << ' ' << *this);
c47b98ac 524 assert(lock_count > 0);
5e263176 525 --lock_count;
62e76326 526
c21ad0f5 527 if (lock_count)
528 return (int) lock_count;
62e76326 529
c21ad0f5 530 if (store_status == STORE_PENDING)
531 setReleaseFlag();
62e76326 532
c21ad0f5 533 assert(storePendingNClients(this) == 0);
62e76326 534
9199139f 535 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
5f33b71d 536 this->release();
984f9874
AR
537 return 0;
538 }
539
9487bae9 540 if (EBIT_TEST(flags, KEY_PRIVATE))
e0236918 541 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
62e76326 542
9487bae9 543 Store::Root().handleIdleEntry(*this); // may delete us
6c895381 544 return 0;
090089c4 545}
546
e6ccf245 547void
60745f24 548StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
e6ccf245 549{
550 assert (aClient);
3b13a8fd 551 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
62e76326 552
e6ccf245 553 if (!result)
62e76326 554 aClient->created (NullStoreEntry::getInstance());
e6ccf245 555 else
62e76326 556 aClient->created (result);
e6ccf245 557}
558
559void
190154cf 560StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
e6ccf245 561{
562 assert (aClient);
3b13a8fd 563 StoreEntry *result = storeGetPublicByRequest (request);
62e76326 564
e6ccf245 565 if (!result)
62e76326 566 result = NullStoreEntry::getInstance();
567
e6ccf245 568 aClient->created (result);
569}
570
571void
60745f24 572StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
e6ccf245 573{
574 assert (aClient);
3b13a8fd 575 StoreEntry *result = storeGetPublic (uri, method);
62e76326 576
e6ccf245 577 if (!result)
62e76326 578 result = NullStoreEntry::getInstance();
579
e6ccf245 580 aClient->created (result);
581}
582
08e5d64f 583StoreEntry *
60745f24 584storeGetPublic(const char *uri, const HttpRequestMethod& method)
08e5d64f 585{
c8f4eac4 586 return Store::Root().get(storeKeyPublic(uri, method));
08e5d64f 587}
588
f66a9ef4 589StoreEntry *
60745f24 590storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
f66a9ef4 591{
c8f4eac4 592 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
f66a9ef4 593}
594
595StoreEntry *
190154cf 596storeGetPublicByRequest(HttpRequest * req)
f66a9ef4 597{
598 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
62e76326 599
c2a7cefd 600 if (e == NULL && req->method == Http::METHOD_HEAD)
62e76326 601 /* We can generate a HEAD reply from a cached GET object */
c2a7cefd 602 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
62e76326 603
f66a9ef4 604 return e;
605}
606
007b8be4 607static int
608getKeyCounter(void)
04e8dbaa 609{
007b8be4 610 static int key_counter = 0;
62e76326 611
007b8be4 612 if (++key_counter < 0)
62e76326 613 key_counter = 1;
614
007b8be4 615 return key_counter;
04e8dbaa 616}
617
c8f4eac4 618/* RBC 20050104 AFAICT this should become simpler:
619 * rather than reinserting with a special key it should be marked
620 * as 'released' and then cleaned up when refcounting indicates.
621 * the StoreHashIndex could well implement its 'released' in the
622 * current manner.
623 * Also, clean log writing should skip over ia,t
624 * Otherwise, we need a 'remove from the index but not the store
625 * concept'.
626 */
6c57e268 627void
d88e3c49 628StoreEntry::setPrivateKey()
227fbb74 629{
9fb13bb6 630 const cache_key *newkey;
62e76326 631
d88e3c49 632 if (key && EBIT_TEST(flags, KEY_PRIVATE))
c21ad0f5 633 return; /* is already private */
62e76326 634
d88e3c49 635 if (key) {
6919be24
AR
636 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
637
638 // TODO: move into SwapDir::markForUnlink() already called by Root()
d88e3c49 639 if (swap_filen > -1)
640 storeDirSwapLog(this, SWAP_LOG_DEL);
62e76326 641
3900307b 642 hashDelete();
b109de6b 643 }
62e76326 644
c877c0bc 645 if (mem_obj && mem_obj->hasUris()) {
d88e3c49 646 mem_obj->id = getKeyCounter();
c877c0bc 647 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
9fb13bb6 648 } else {
c2a7cefd 649 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
9fb13bb6 650 }
62e76326 651
9fb13bb6 652 assert(hash_lookup(store_table, newkey) == NULL);
d88e3c49 653 EBIT_SET(flags, KEY_PRIVATE);
3900307b 654 hashInsert(newkey);
227fbb74 655}
656
b8d8561b 657void
d88e3c49 658StoreEntry::setPublicKey()
227fbb74 659{
9fb13bb6 660 const cache_key *newkey;
62e76326 661
d88e3c49 662 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
c21ad0f5 663 return; /* is already public */
62e76326 664
d88e3c49 665 assert(mem_obj);
62e76326 666
f3e570e9 667 /*
668 * We can't make RELEASE_REQUEST objects public. Depending on
669 * when RELEASE_REQUEST gets set, we might not be swapping out
670 * the object. If we're not swapping out, then subsequent
671 * store clients won't be able to access object data which has
672 * been freed from memory.
d87ebd78 673 *
6919be24 674 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
f3e570e9 675 */
6a566b9c 676#if MORE_DEBUG_OUTPUT
62e76326 677
d88e3c49 678 if (EBIT_TEST(flags, RELEASE_REQUEST))
e0236918 679 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
62e76326 680
2b906e48 681#endif
62e76326 682
d88e3c49 683 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
62e76326 684
d88e3c49 685 if (mem_obj->request) {
686 HttpRequest *request = mem_obj->request;
62e76326 687
d88e3c49 688 if (!mem_obj->vary_headers) {
62e76326 689 /* First handle the case where the object no longer varies */
690 safe_free(request->vary_headers);
691 } else {
d88e3c49 692 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
62e76326 693 /* Oops.. the variance has changed. Kill the base object
694 * to record the new variance key
695 */
c21ad0f5 696 safe_free(request->vary_headers); /* free old "bad" variance key */
c877c0bc 697 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
5f33b71d 698 pe->release();
62e76326 699 }
700
701 /* Make sure the request knows the variance status */
702 if (!request->vary_headers) {
d88e3c49 703 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
62e76326 704
705 if (vary)
706 request->vary_headers = xstrdup(vary);
707 }
708 }
709
9487bae9
AR
710 // TODO: storeGetPublic() calls below may create unlocked entries.
711 // We should add/use storeHas() API or lock/unlock those entries.
c877c0bc 712 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
62e76326 713 /* Create "vary" base object */
30abd221 714 String vary;
c877c0bc 715 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
62e76326 716 /* We are allowed to do this typecast */
eab8dcfa 717 HttpReply *rep = new HttpReply;
955394ce 718 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
d88e3c49 719 vary = mem_obj->getReply()->header.getList(HDR_VARY);
62e76326 720
30abd221 721 if (vary.size()) {
62e76326 722 /* Again, we own this structure layout */
b4f2886c 723 rep->header.putStr(HDR_VARY, vary.termedBuf());
30abd221 724 vary.clean();
62e76326 725 }
726
f66a9ef4 727#if X_ACCELERATOR_VARY
d88e3c49 728 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
62e76326 729
b38b26cb 730 if (vary.size() > 0) {
62e76326 731 /* Again, we own this structure layout */
b4197865 732 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
30abd221 733 vary.clean();
62e76326 734 }
735
f66a9ef4 736#endif
b8a899c0 737 pe->replaceHttpReply(rep, false); // no write until key is public
62e76326 738
3900307b 739 pe->timestampsSet();
ba27c9a0 740
eab8dcfa 741 pe->makePublic();
62e76326 742
b8a899c0
AR
743 pe->startWriting(); // after makePublic()
744
62e76326 745 pe->complete();
eab8dcfa 746
1bfe9ade 747 pe->unlock("StoreEntry::setPublicKey+Vary");
62e76326 748 }
749
d88e3c49 750 newkey = storeKeyPublicByRequest(mem_obj->request);
f66a9ef4 751 } else
c877c0bc 752 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
62e76326 753
c877c0bc
AR
754 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
755 debugs(20, 3, "Making old " << *e2 << " private.");
d88e3c49 756 e2->setPrivateKey();
5f33b71d 757 e2->release();
62e76326 758
d88e3c49 759 if (mem_obj->request)
760 newkey = storeKeyPublicByRequest(mem_obj->request);
62e76326 761 else
c877c0bc 762 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
6eb42cae 763 }
62e76326 764
d88e3c49 765 if (key)
3900307b 766 hashDelete();
62e76326 767
d88e3c49 768 EBIT_CLR(flags, KEY_PRIVATE);
62e76326 769
3900307b 770 hashInsert(newkey);
62e76326 771
d88e3c49 772 if (swap_filen > -1)
773 storeDirSwapLog(this, SWAP_LOG_ADD);
227fbb74 774}
775
b8d8561b 776StoreEntry *
1bfe9ade 777storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
090089c4 778{
090089c4 779 StoreEntry *e = NULL;
bf8fe701 780 debugs(20, 3, "storeCreateEntry: '" << url << "'");
090089c4 781
c877c0bc 782 e = new StoreEntry();
c877c0bc
AR
783 e->makeMemObject();
784 e->mem_obj->setUris(url, log_url, method);
62e76326 785
45e5102d 786 if (flags.cachable) {
62e76326 787 EBIT_CLR(e->flags, RELEASE_REQUEST);
090089c4 788 } else {
d88e3c49 789 e->releaseRequest();
090089c4 790 }
62e76326 791
234967c9 792 e->store_status = STORE_PENDING;
090089c4 793 e->refcount = 0;
b8de7ebe 794 e->lastref = squid_curtime;
3900307b 795 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
30a4f2a8 796 e->ping_status = PING_NONE;
d46a87a8 797 EBIT_SET(e->flags, ENTRY_VALIDATED);
090089c4 798 return e;
799}
800
1bfe9ade
AR
801StoreEntry *
802storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
803{
804 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
805 e->lock("storeCreateEntry");
806
807 if (neighbors_do_private_keys || !flags.hierarchical)
808 e->setPrivateKey();
809 else
810 e->setPublicKey();
811
812 return e;
813}
814
6eb42cae 815/* Mark object as expired */
b8d8561b 816void
d88e3c49 817StoreEntry::expireNow()
9174e204 818{
bf8fe701 819 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
d88e3c49 820 expires = squid_curtime;
9174e204 821}
822
528b2c61 823void
824StoreEntry::write (StoreIOBuffer writeBuffer)
825{
826 assert(mem_obj != NULL);
528b2c61 827 /* This assert will change when we teach the store to update */
d2639a5b 828 PROF_start(StoreEntry_write);
528b2c61 829 assert(store_status == STORE_PENDING);
62e76326 830
55759ffb
AR
831 // XXX: caller uses content offset, but we also store headers
832 if (const HttpReply *reply = mem_obj->getReply())
833 writeBuffer.offset += reply->hdr_sz;
834
d2639a5b 835 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
836 PROF_stop(StoreEntry_write);
528b2c61 837 storeGetMemSpace(writeBuffer.length);
55759ffb
AR
838 mem_obj->write(writeBuffer);
839
840 if (!EBIT_TEST(flags, DELAY_SENDING))
841 invokeHandlers();
528b2c61 842}
843
c21ad0f5 844/* Append incoming data from a primary server to an entry. */
845void
846StoreEntry::append(char const *buf, int len)
847{
848 assert(mem_obj != NULL);
3a1c3e2f 849 assert(len >= 0);
c21ad0f5 850 assert(store_status == STORE_PENDING);
528b2c61 851
852 StoreIOBuffer tempBuffer;
853 tempBuffer.data = (char *)buf;
854 tempBuffer.length = len;
aa18a4ca 855 /*
856 * XXX sigh, offset might be < 0 here, but it gets "corrected"
857 * later. This offset crap is such a mess.
858 */
c21ad0f5 859 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
860 write(tempBuffer);
090089c4 861}
862
b8d8561b 863void
fe4e214f 864storeAppendPrintf(StoreEntry * e, const char *fmt,...)
15c05bb0 865{
62d32805 866 va_list args;
867 va_start(args, fmt);
62e76326 868
cb69b4c7 869 storeAppendVPrintf(e, fmt, args);
870 va_end(args);
871}
872
873/* used be storeAppendPrintf and Packer */
874void
875storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
876{
877 LOCAL_ARRAY(char, buf, 4096);
15c05bb0 878 buf[0] = '\0';
cb69b4c7 879 vsnprintf(buf, 4096, fmt, vargs);
3900307b 880 e->append(buf, strlen(buf));
c30c5a73 881}
882
26ac0430 883struct _store_check_cachable_hist {
62e76326 884
26ac0430 885 struct {
62e76326 886 int non_get;
887 int not_entry_cachable;
888 int wrong_content_length;
889 int negative_cached;
890 int too_big;
891 int too_small;
892 int private_key;
893 int too_many_open_files;
894 int too_many_open_fds;
2fadd50d 895 } no;
62e76326 896
26ac0430 897 struct {
62e76326 898 int Default;
2fadd50d
HN
899 } yes;
900} store_check_cachable_hist;
8423ff74 901
c47511fd 902int
903storeTooManyDiskFilesOpen(void)
904{
905 if (Config.max_open_disk_fds == 0)
62e76326 906 return 0;
907
83a29c95 908 if (store_open_disk_fd > Config.max_open_disk_fds)
62e76326 909 return 1;
910
c47511fd 911 return 0;
912}
913
3900307b 914int
915StoreEntry::checkTooSmall()
d20b1cd0 916{
3900307b 917 if (EBIT_TEST(flags, ENTRY_SPECIAL))
62e76326 918 return 0;
919
3900307b 920 if (STORE_OK == store_status)
41afe8b2 921 if (mem_obj->object_sz >= 0 &&
26ac0430 922 mem_obj->object_sz < Config.Store.minObjectSize)
62e76326 923 return 1;
3900307b 924 if (getReply()->content_length > -1)
47f6e231 925 if (getReply()->content_length < Config.Store.minObjectSize)
62e76326 926 return 1;
d20b1cd0 927 return 0;
928}
929
ddc9b32c 930// TODO: move "too many open..." checks outside -- we are called too early/late
7015a149 931bool
3900307b 932StoreEntry::checkCachable()
6602e70e 933{
97754f5a
AR
934 // XXX: This method is used for both memory and disk caches, but some
935 // checks are specific to disk caches. Move them to mayStartSwapOut().
936
937 // XXX: This method may be called several times, sometimes with different
938 // outcomes, making store_check_cachable_hist counters misleading.
939
940 // check this first to optimize handling of repeated calls for uncachables
941 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
942 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
943 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
944 return 0; // avoid rerequesting release below
945 }
946
2ac237e2 947#if CACHE_ALL_METHODS
62e76326 948
c2a7cefd 949 if (mem_obj->method != Http::METHOD_GET) {
bf8fe701 950 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
5db6bf73 951 ++store_check_cachable_hist.no.non_get;
2ac237e2 952 } else
953#endif
3900307b 954 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
bf8fe701 955 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
5db6bf73 956 ++store_check_cachable_hist.no.wrong_content_length;
3900307b 957 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
bf8fe701 958 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
5db6bf73 959 ++store_check_cachable_hist.no.negative_cached;
c21ad0f5 960 return 0; /* avoid release call below */
3900307b 961 } else if ((getReply()->content_length > 0 &&
b51ec8c8
AJ
962 getReply()->content_length > store_maxobjsize) ||
963 mem_obj->endOffset() > store_maxobjsize) {
bf8fe701 964 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
5db6bf73 965 ++store_check_cachable_hist.no.too_big;
3900307b 966 } else if (checkTooSmall()) {
bf8fe701 967 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
5db6bf73 968 ++store_check_cachable_hist.no.too_small;
3900307b 969 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
bf8fe701 970 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
5db6bf73 971 ++store_check_cachable_hist.no.private_key;
3900307b 972 } else if (swap_status != SWAPOUT_NONE) {
62e76326 973 /*
974 * here we checked the swap_status because the remaining
975 * cases are only relevant only if we haven't started swapping
976 * out the object yet.
977 */
978 return 1;
979 } else if (storeTooManyDiskFilesOpen()) {
bf8fe701 980 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
5db6bf73 981 ++store_check_cachable_hist.no.too_many_open_files;
62e76326 982 } else if (fdNFree() < RESERVED_FD) {
bf8fe701 983 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
5db6bf73 984 ++store_check_cachable_hist.no.too_many_open_fds;
62e76326 985 } else {
5db6bf73 986 ++store_check_cachable_hist.yes.Default;
62e76326 987 return 1;
988 }
989
3900307b 990 releaseRequest();
6602e70e 991 return 0;
992}
993
3900307b 994void
995storeCheckCachableStats(StoreEntry *sentry)
8423ff74 996{
c40acff3 997 storeAppendPrintf(sentry, "Category\t Count\n");
998
a0cd8f99 999#if CACHE_ALL_METHODS
62e76326 1000
8423ff74 1001 storeAppendPrintf(sentry, "no.non_get\t%d\n",
62e76326 1002 store_check_cachable_hist.no.non_get);
a0cd8f99 1003#endif
62e76326 1004
8423ff74 1005 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
62e76326 1006 store_check_cachable_hist.no.not_entry_cachable);
8423ff74 1007 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
62e76326 1008 store_check_cachable_hist.no.wrong_content_length);
8423ff74 1009 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
62e76326 1010 store_check_cachable_hist.no.negative_cached);
8423ff74 1011 storeAppendPrintf(sentry, "no.too_big\t%d\n",
62e76326 1012 store_check_cachable_hist.no.too_big);
d20b1cd0 1013 storeAppendPrintf(sentry, "no.too_small\t%d\n",
62e76326 1014 store_check_cachable_hist.no.too_small);
8423ff74 1015 storeAppendPrintf(sentry, "no.private_key\t%d\n",
62e76326 1016 store_check_cachable_hist.no.private_key);
c5f627c2 1017 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
62e76326 1018 store_check_cachable_hist.no.too_many_open_files);
59ffcdf8 1019 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
62e76326 1020 store_check_cachable_hist.no.too_many_open_fds);
8423ff74 1021 storeAppendPrintf(sentry, "yes.default\t%d\n",
62e76326 1022 store_check_cachable_hist.yes.Default);
8423ff74 1023}
1024
b8d8561b 1025void
528b2c61 1026StoreEntry::complete()
090089c4 1027{
bf8fe701 1028 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
62e76326 1029
528b2c61 1030 if (store_status != STORE_PENDING) {
62e76326 1031 /*
1032 * if we're not STORE_PENDING, then probably we got aborted
1033 * and there should be NO clients on this entry
1034 */
1035 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1036 assert(mem_obj->nclients == 0);
1037 return;
b6403fac 1038 }
62e76326 1039
7d31d5fa 1040 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1041 * in use of object_sz?
1042 */
528b2c61 1043 mem_obj->object_sz = mem_obj->endOffset();
7d31d5fa 1044
528b2c61 1045 store_status = STORE_OK;
7d31d5fa 1046
528b2c61 1047 assert(mem_status == NOT_IN_MEMORY);
62e76326 1048
528b2c61 1049 if (!validLength()) {
62e76326 1050 EBIT_SET(flags, ENTRY_BAD_LENGTH);
d88e3c49 1051 releaseRequest();
41587298 1052 }
62e76326 1053
6cfa8966 1054#if USE_CACHE_DIGESTS
528b2c61 1055 if (mem_obj->request)
62e76326 1056 mem_obj->request->hier.store_complete_stop = current_time;
1057
39edba21 1058#endif
d20b1cd0 1059 /*
d88e3c49 1060 * We used to call invokeHandlers, then storeSwapOut. However,
d20b1cd0 1061 * Madhukar Reddy <myreddy@persistence.com> reported that
1062 * responses without content length would sometimes get released
1063 * in client_side, thinking that the response is incomplete.
1064 */
d88e3c49 1065 invokeHandlers();
7e3e1d01 1066}
1067
090089c4 1068/*
474cac1b 1069 * Someone wants to abort this transfer. Set the reason in the
1070 * request structure, call the server-side callback and mark the
2b906e48 1071 * entry for releasing
090089c4 1072 */
b8d8561b 1073void
bfb55b6f 1074StoreEntry::abort()
090089c4 1075{
5db6bf73 1076 ++statCounter.aborted_requests;
bfb55b6f 1077 assert(store_status == STORE_PENDING);
1078 assert(mem_obj != NULL);
bf8fe701 1079 debugs(20, 6, "storeAbort: " << getMD5Text());
34266cde 1080
1bfe9ade 1081 lock("StoreEntry::abort"); /* lock while aborting */
d88e3c49 1082 negativeCache();
34266cde 1083
d88e3c49 1084 releaseRequest();
34266cde 1085
bfb55b6f 1086 EBIT_SET(flags, ENTRY_ABORTED);
34266cde 1087
3900307b 1088 setMemStatus(NOT_IN_MEMORY);
34266cde 1089
bfb55b6f 1090 store_status = STORE_OK;
34266cde 1091
474cac1b 1092 /* Notify the server side */
62e76326 1093
8ea67c2b 1094 /*
1095 * DPW 2007-05-07
1096 * Should we check abort.data for validity?
1097 */
bfb55b6f 1098 if (mem_obj->abort.callback) {
26ac0430 1099 if (!cbdataReferenceValid(mem_obj->abort.data))
e0236918 1100 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
bfb55b6f 1101 eventAdd("mem_obj->abort.callback",
1102 mem_obj->abort.callback,
1103 mem_obj->abort.data,
62e76326 1104 0.0,
8ea67c2b 1105 true);
26ac0430 1106 unregisterAbort();
bfcaf585 1107 }
62e76326 1108
1109 /* XXX Should we reverse these two, so that there is no
26ac0430 1110 * unneeded disk swapping triggered?
528b2c61 1111 */
474cac1b 1112 /* Notify the client side */
d88e3c49 1113 invokeHandlers();
62e76326 1114
aa1a691e
AR
1115 // abort swap out, invalidating what was created so far (release follows)
1116 swapOutFileClose(StoreIOState::writerGone);
62e76326 1117
1bfe9ade 1118 unlock("StoreEntry::abort"); /* unlock */
090089c4 1119}
1120
6d3c2758
HN
1121/**
1122 * Clear Memory storage to accommodate the given object len
1123 */
1124void
d4432957 1125storeGetMemSpace(int size)
090089c4 1126{
1d5161bd 1127 PROF_start(storeGetMemSpace);
b32508fb 1128 StoreEntry *e = NULL;
20cba4b4 1129 int released = 0;
b32508fb 1130 static time_t last_check = 0;
528b2c61 1131 size_t pages_needed;
6a566b9c 1132 RemovalPurgeWalker *walker;
62e76326 1133
1d5161bd 1134 if (squid_curtime == last_check) {
1135 PROF_stop(storeGetMemSpace);
62e76326 1136 return;
1d5161bd 1137 }
62e76326 1138
b32508fb 1139 last_check = squid_curtime;
62e76326 1140
2ad51840 1141 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
62e76326 1142
1d5161bd 1143 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1144 PROF_stop(storeGetMemSpace);
62e76326 1145 return;
1d5161bd 1146 }
62e76326 1147
e4049756 1148 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1149 " pages");
62e76326 1150
6a566b9c 1151 /* XXX what to set as max_scan here? */
1152 walker = mem_policy->PurgeInit(mem_policy, 100000);
62e76326 1153
c1dd71ae 1154 while ((e = walker->Next(walker))) {
d88e3c49 1155 e->purgeMem();
5db6bf73 1156 ++released;
62e76326 1157
1158 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1159 break;
8350fe9b 1160 }
62e76326 1161
6a566b9c 1162 walker->Done(walker);
bf8fe701 1163 debugs(20, 3, "storeGetMemSpace stats:");
1164 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1165 debugs(20, 3, " " << std::setw(6) << released << " were released");
1d5161bd 1166 PROF_stop(storeGetMemSpace);
090089c4 1167}
1168
c8f4eac4 1169/* thunk through to Store::Root().maintain(). Note that this would be better still
26ac0430
AJ
1170 * if registered against the root store itself, but that requires more complex
1171 * update logic - bigger fish to fry first. Long term each store when
c8f4eac4 1172 * it becomes active will self register
1173 */
1174void
1175Store::Maintain(void *notused)
1176{
1177 Store::Root().maintain();
1178
1179 /* Reregister a maintain event .. */
1180 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1181
1182}
1183
090089c4 1184/* The maximum objects to scan for maintain storage space */
c21ad0f5 1185#define MAINTAIN_MAX_SCAN 1024
1186#define MAINTAIN_MAX_REMOVE 64
090089c4 1187
2b906e48 1188/*
fcefe642 1189 * This routine is to be called by main loop in main.c.
1190 * It removes expired objects on only one bucket for each time called.
fcefe642 1191 *
1192 * This should get called 1/s from main().
1193 */
679ac4f0 1194void
c8f4eac4 1195StoreController::maintain()
090089c4 1196{
6a566b9c 1197 static time_t last_warn_time = 0;
cd748f27 1198
88bfe092 1199 PROF_start(storeMaintainSwapSpace);
c8f4eac4 1200 swapDir->maintain();
62e76326 1201
c8f4eac4 1202 /* this should be emitted by the oversize dir, not globally */
62e76326 1203
39c1e1d9 1204 if (Store::Root().currentSize() > Store::Root().maxSize()) {
62e76326 1205 if (squid_curtime - last_warn_time > 10) {
c575fb6a 1206 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
cc34568d
DK
1207 << Store::Root().currentSize() / 1024.0 << " KB > "
1208 << (Store::Root().maxSize() >> 10) << " KB");
62e76326 1209 last_warn_time = squid_curtime;
1210 }
6a566b9c 1211 }
62e76326 1212
88bfe092 1213 PROF_stop(storeMaintainSwapSpace);
090089c4 1214}
1215
090089c4 1216/* release an object from a cache */
6c78a099 1217void
5f33b71d 1218StoreEntry::release()
090089c4 1219{
88bfe092 1220 PROF_start(storeRelease);
1bfe9ade 1221 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
090089c4 1222 /* If, for any reason we can't discard this object because of an
1223 * outstanding request, mark it for pending release */
62e76326 1224
3900307b 1225 if (locked()) {
d88e3c49 1226 expireNow();
bf8fe701 1227 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
d88e3c49 1228 releaseRequest();
62e76326 1229 PROF_stop(storeRelease);
1230 return;
090089c4 1231 }
62e76326 1232
ce49546e
AR
1233 Store::Root().memoryUnlink(*this);
1234
bef81ea5 1235 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
d88e3c49 1236 setPrivateKey();
62e76326 1237
5f33b71d 1238 if (swap_filen > -1) {
acc5dc4c
AR
1239 // lock the entry until rebuilding is done
1240 lock("storeLateRelease");
5f33b71d 1241 setReleaseFlag();
cfb88efb 1242 LateReleaseStack.push(this);
62e76326 1243 } else {
5f33b71d 1244 destroyStoreEntry(static_cast<hash_link *>(this));
22c25cbb 1245 // "this" is no longer valid
62e76326 1246 }
22c25cbb
AR
1247
1248 PROF_stop(storeRelease);
1249 return;
43d9cf56 1250 }
62e76326 1251
5f33b71d 1252 storeLog(STORE_LOG_RELEASE, this);
62e76326 1253
5f33b71d 1254 if (swap_filen > -1) {
f58bb2f4 1255 // log before unlink() below clears swap_filen
5f33b71d 1256 if (!EBIT_TEST(flags, KEY_PRIVATE))
1257 storeDirSwapLog(this, SWAP_LOG_DEL);
62e76326 1258
f58bb2f4 1259 unlink();
090089c4 1260 }
62e76326 1261
5f33b71d 1262 destroyStoreEntry(static_cast<hash_link *>(this));
88bfe092 1263 PROF_stop(storeRelease);
090089c4 1264}
1265
e42d5181 1266static void
1267storeLateRelease(void *unused)
1268{
1269 StoreEntry *e;
e42d5181 1270 static int n = 0;
62e76326 1271
bef81ea5 1272 if (StoreController::store_dirs_rebuilding) {
62e76326 1273 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1274 return;
e42d5181 1275 }
62e76326 1276
cfb88efb
AR
1277 // TODO: this works but looks unelegant.
1278 for (int i = 0; i < 10; ++i) {
1279 if (LateReleaseStack.empty()) {
24b14da9
FC
1280 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1281 return;
cfb88efb
AR
1282 } else {
1283 e = LateReleaseStack.top();
1284 LateReleaseStack.pop();
3aa53107 1285 }
62e76326 1286
1bfe9ade 1287 e->unlock("storeLateRelease");
5db6bf73 1288 ++n;
e42d5181 1289 }
62e76326 1290
e42d5181 1291 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1292}
1293
090089c4 1294/* return 1 if a store entry is locked */
cd748f27 1295int
3900307b 1296StoreEntry::locked() const
090089c4 1297{
3900307b 1298 if (lock_count)
62e76326 1299 return 1;
1300
b8890359 1301 /*
1bfe9ade
AR
1302 * SPECIAL, PUBLIC entries should be "locked";
1303 * XXX: Their owner should lock them then instead of relying on this hack.
b8890359 1304 */
3900307b 1305 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1306 if (!EBIT_TEST(flags, KEY_PRIVATE))
62e76326 1307 return 1;
1308
30a4f2a8 1309 return 0;
090089c4 1310}
1311
528b2c61 1312bool
1313StoreEntry::validLength() const
6602e70e 1314{
47f6e231 1315 int64_t diff;
d8b249ef 1316 const HttpReply *reply;
528b2c61 1317 assert(mem_obj != NULL);
1318 reply = getReply();
bf8fe701 1319 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
e4049756 1320 debugs(20, 5, "storeEntryValidLength: object_len = " <<
707fdc47 1321 objectLen());
bf8fe701 1322 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1323 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
62e76326 1324
d8b249ef 1325 if (reply->content_length < 0) {
bf8fe701 1326 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
62e76326 1327 return 1;
ffe4a367 1328 }
62e76326 1329
07304bf9 1330 if (reply->hdr_sz == 0) {
bf8fe701 1331 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
62e76326 1332 return 1;
ffe4a367 1333 }
62e76326 1334
c2a7cefd 1335 if (mem_obj->method == Http::METHOD_HEAD) {
bf8fe701 1336 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
62e76326 1337 return 1;
ffe4a367 1338 }
62e76326 1339
9b769c67 1340 if (reply->sline.status() == Http::scNotModified)
62e76326 1341 return 1;
1342
9b769c67 1343 if (reply->sline.status() == Http::scNoContent)
62e76326 1344 return 1;
1345
707fdc47 1346 diff = reply->hdr_sz + reply->content_length - objectLen();
62e76326 1347
ebf4efff 1348 if (diff == 0)
62e76326 1349 return 1;
1350
bf8fe701 1351 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
62e76326 1352
ebf4efff 1353 return 0;
ffe4a367 1354}
6602e70e 1355
6b7d87bb
FC
1356static void
1357storeRegisterWithCacheManager(void)
1358{
8822ebee
AR
1359 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1360 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1361 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
d9fc6862 1362 storeCheckCachableStats, 0, 1);
6b7d87bb
FC
1363}
1364
b8d8561b 1365void
1366storeInit(void)
c943f331 1367{
25535cbe 1368 storeKeyInit();
6a566b9c 1369 mem_policy = createRemovalPolicy(Config.memPolicy);
8638fc66 1370 storeDigestInit();
e3ef2b09 1371 storeLogOpen();
e42d5181 1372 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
c8f4eac4 1373 Store::Root().init();
b2c141d4 1374 storeRebuildStart();
d120ed12
FC
1375
1376 storeRegisterWithCacheManager();
62ee09ca 1377}
1378
3c856e95
AR
1379/// computes maximum size of a cachable object
1380/// larger objects are rejected by all (disk and memory) cache stores
1381static int64_t
1382storeCalcMaxObjSize()
1383{
1384 int64_t ms = 0; // nothing can be cached without at least one store consent
1385
1386 // global maximum is at least the disk store maximum
1387 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1388 assert (Config.cacheSwap.swapDirs[i].getRaw());
1389 const int64_t storeMax = dynamic_cast<SwapDir *>(Config.cacheSwap.swapDirs[i].getRaw())->maxObjectSize();
1390 if (ms < storeMax)
1391 ms = storeMax;
1392 }
1393
1394 // global maximum is at least the memory store maximum
1395 // TODO: move this into a memory cache class when we have one
1396 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
1397 if (ms < memMax)
1398 ms = memMax;
1399
1400 return ms;
1401}
1402
b8d8561b 1403void
1404storeConfigure(void)
b1c0cc67 1405{
c8f4eac4 1406 store_swap_high = (long) (((float) Store::Root().maxSize() *
62e76326 1407 (float) Config.Swap.highWaterMark) / (float) 100);
c8f4eac4 1408 store_swap_low = (long) (((float) Store::Root().maxSize() *
62e76326 1409 (float) Config.Swap.lowWaterMark) / (float) 100);
505d4821 1410 store_pages_max = Config.memMaxSize / sizeof(mem_node);
3c856e95
AR
1411
1412 store_maxobjsize = storeCalcMaxObjSize();
090089c4 1413}
1414
9487bae9 1415bool
97754f5a 1416StoreEntry::memoryCachable()
56f29785 1417{
97754f5a
AR
1418 if (!checkCachable())
1419 return 0;
1420
3900307b 1421 if (mem_obj == NULL)
62e76326 1422 return 0;
1423
3900307b 1424 if (mem_obj->data_hdr.size() == 0)
62e76326 1425 return 0;
1426
19fdd3f3 1427 if (mem_obj->inmem_lo != 0)
e1381638 1428 return 0;
19fdd3f3 1429
d227e181 1430 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
e1381638 1431 return 0;
19fdd3f3
HN
1432
1433 return 1;
56f29785 1434}
1435
edce4d98 1436int
3900307b 1437StoreEntry::checkNegativeHit() const
edce4d98 1438{
3900307b 1439 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
62e76326 1440 return 0;
1441
3900307b 1442 if (expires <= squid_curtime)
62e76326 1443 return 0;
1444
3900307b 1445 if (store_status != STORE_OK)
62e76326 1446 return 0;
1447
edce4d98 1448 return 1;
1449}
1450
ac9cc053
AJ
1451/**
1452 * Set object for negative caching.
1453 * Preserves any expiry information given by the server.
1454 * In absence of proper expiry info it will set to expire immediately,
1455 * or with HTTP-violations enabled the configured negative-TTL is observed
1456 */
b8d8561b 1457void
d88e3c49 1458StoreEntry::negativeCache()
79b5cc5f 1459{
da03a7e0
AJ
1460 // XXX: should make the default for expires 0 instead of -1
1461 // so we can distinguish "Expires: -1" from nothing.
1462 if (expires <= 0)
626096be 1463#if USE_HTTP_VIOLATIONS
ac9cc053
AJ
1464 expires = squid_curtime + Config.negativeTtl;
1465#else
1466 expires = squid_curtime;
1467#endif
d88e3c49 1468 EBIT_SET(flags, ENTRY_NEGCACHED);
79b5cc5f 1469}
0a21bd84 1470
1471void
1472storeFreeMemory(void)
1473{
c8f4eac4 1474 Store::Root(NULL);
9bc73deb 1475#if USE_CACHE_DIGESTS
62e76326 1476
8638fc66 1477 if (store_digest)
62e76326 1478 cacheDigestDestroy(store_digest);
1479
c68e9c6b 1480#endif
62e76326 1481
8638fc66 1482 store_digest = NULL;
0a21bd84 1483}
a7e59001 1484
1485int
1486expiresMoreThan(time_t expires, time_t when)
1487{
c21ad0f5 1488 if (expires < 0) /* No Expires given */
62e76326 1489 return 1;
1490
48f44632 1491 return (expires > (squid_curtime + when));
a7e59001 1492}
fe54d06d 1493
1494int
3900307b 1495StoreEntry::validToSend() const
fe54d06d 1496{
3900307b 1497 if (EBIT_TEST(flags, RELEASE_REQUEST))
62e76326 1498 return 0;
1499
3900307b 1500 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1501 if (expires <= squid_curtime)
62e76326 1502 return 0;
1503
3900307b 1504 if (EBIT_TEST(flags, ENTRY_ABORTED))
62e76326 1505 return 0;
1506
22696a16
AR
1507 // now check that the entry has a cache backing or is collapsed
1508 if (swap_filen > -1) // backed by a disk cache
1509 return 1;
1510
1511 if (swappingOut()) // will be backed by a disk cache
1512 return 1;
1513
1514 if (!mem_obj) // not backed by a memory cache and not collapsed
1515 return 0;
1516
22696a16 1517 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
a4b04ff8
AR
1518 // disk cache backing that store_client constructor will assert. XXX: This
1519 // is wrong for range requests (that could feed off nibbled memory) and for
1520 // entries backed by the shared memory cache (that could, in theory, get
1521 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1522 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
22696a16
AR
1523 return 0;
1524
a4b04ff8
AR
1525 // The following check is correct but useless at this position. TODO: Move
1526 // it up when the shared memory cache can either replenish locally nibbled
1527 // bytes or, better, does not use local RAM copy at all.
1528 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1529 // return 1;
1530
fe54d06d 1531 return 1;
1532}
62663274 1533
ca98227c 1534void
3900307b 1535StoreEntry::timestampsSet()
ca98227c 1536{
3900307b 1537 const HttpReply *reply = getReply();
2f58241d 1538 time_t served_date = reply->date;
a9925b40 1539 int age = reply->header.getInt(HDR_AGE);
31d36bfd 1540 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
2f58241d 1541 /* make sure that 0 <= served_date <= squid_curtime */
62e76326 1542
2f58241d 1543 if (served_date < 0 || served_date > squid_curtime)
62e76326 1544 served_date = squid_curtime;
1545
525bf9dc
BD
1546 /* Bug 1791:
1547 * If the returned Date: is more than 24 hours older than
1548 * the squid_curtime, then one of us needs to use NTP to set our
1549 * clock. We'll pretend that our clock is right.
1550 */
1551 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1552 served_date = squid_curtime;
62e76326 1553
efd900cb 1554 /*
212cbb48 1555 * Compensate with Age header if origin server clock is ahead
1556 * of us and there is a cache in between us and the origin
1557 * server. But DONT compensate if the age value is larger than
1558 * squid_curtime because it results in a negative served_date.
efd900cb 1559 */
1560 if (age > squid_curtime - served_date)
62e76326 1561 if (squid_curtime > age)
1562 served_date = squid_curtime - age;
1563
31d36bfd
AR
1564 // compensate for Squid-to-server and server-to-Squid delays
1565 if (mem_obj && mem_obj->request) {
1566 const time_t request_sent =
1567 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1568 if (0 < request_sent && request_sent < squid_curtime)
1569 served_date -= (squid_curtime - request_sent);
1570 }
1571
0d465a25 1572 if (reply->expires > 0 && reply->date > -1)
26ac0430 1573 expires = served_date + (reply->expires - reply->date);
0d465a25 1574 else
26ac0430 1575 expires = reply->expires;
62e76326 1576
3900307b 1577 lastmod = reply->last_modified;
62e76326 1578
3900307b 1579 timestamp = served_date;
ca98227c 1580}
429fdbec 1581
bfcaf585 1582void
3900307b 1583StoreEntry::registerAbort(STABH * cb, void *data)
bfcaf585 1584{
3900307b 1585 assert(mem_obj);
1586 assert(mem_obj->abort.callback == NULL);
1587 mem_obj->abort.callback = cb;
8ea67c2b 1588 mem_obj->abort.data = cbdataReference(data);
bfcaf585 1589}
1590
1591void
3900307b 1592StoreEntry::unregisterAbort()
bfcaf585 1593{
3900307b 1594 assert(mem_obj);
8ea67c2b 1595 if (mem_obj->abort.callback) {
26ac0430
AJ
1596 mem_obj->abort.callback = NULL;
1597 cbdataReferenceDone(mem_obj->abort.data);
8ea67c2b 1598 }
bfcaf585 1599}
88738790 1600
f09f5b26 1601void
3900307b 1602StoreEntry::dump(int l) const
1603{
bf8fe701 1604 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1605 debugs(20, l, "StoreEntry->next: " << next);
1606 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
4a7a3d56 1607 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1608 debugs(20, l, "StoreEntry->lastref: " << lastref);
1609 debugs(20, l, "StoreEntry->expires: " << expires);
1610 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1611 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
bf8fe701 1612 debugs(20, l, "StoreEntry->refcount: " << refcount);
1613 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
4a7a3d56 1614 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1615 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1616 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1617 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1618 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1619 debugs(20, l, "StoreEntry->store_status: " << store_status);
1620 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
d377699f 1621}
1622
1f38f50a 1623/*
1624 * NOTE, this function assumes only two mem states
1625 */
f09f5b26 1626void
3900307b 1627StoreEntry::setMemStatus(mem_status_t new_status)
8350fe9b 1628{
3900307b 1629 if (new_status == mem_status)
62e76326 1630 return;
1631
6ebe9a4c
AR
1632 // are we using a shared memory cache?
1633 if (Config.memShared && IamWorkerProcess()) {
9487bae9
AR
1634 // This method was designed to update replacement policy, not to
1635 // actually purge something from the memory cache (TODO: rename?).
1636 // Shared memory cache does not have a policy that needs updates.
1637 mem_status = new_status;
1638 return;
1639 }
1640
3900307b 1641 assert(mem_obj != NULL);
62e76326 1642
b93bcace 1643 if (new_status == IN_MEMORY) {
3900307b 1644 assert(mem_obj->inmem_lo == 0);
62e76326 1645
3900307b 1646 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
c877c0bc 1647 debugs(20, 4, "not inserting special " << *this << " into policy");
62e76326 1648 } else {
3900307b 1649 mem_policy->Add(mem_policy, this, &mem_obj->repl);
c877c0bc 1650 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
62e76326 1651 }
1652
5db6bf73 1653 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
b93bcace 1654 } else {
3900307b 1655 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
c877c0bc 1656 debugs(20, 4, "not removing special " << *this << " from policy");
62e76326 1657 } else {
3900307b 1658 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
c877c0bc 1659 debugs(20, 4, "removed " << *this);
62e76326 1660 }
1661
5e263176 1662 --hot_obj_count;
b93bcace 1663 }
62e76326 1664
3900307b 1665 mem_status = new_status;
8350fe9b 1666}
6e86c3e8 1667
9fb13bb6 1668const char *
3900307b 1669StoreEntry::url() const
9fb13bb6 1670{
3900307b 1671 if (this == NULL)
62e76326 1672 return "[null_entry]";
3900307b 1673 else if (mem_obj == NULL)
62e76326 1674 return "[null_mem_obj]";
9fb13bb6 1675 else
c877c0bc 1676 return mem_obj->storeId();
9fb13bb6 1677}
24ffafb4 1678
c877c0bc
AR
1679MemObject *
1680StoreEntry::makeMemObject()
24ffafb4 1681{
c877c0bc
AR
1682 if (!mem_obj)
1683 mem_obj = new MemObject();
1684 return mem_obj;
1685}
9487bae9 1686
c877c0bc
AR
1687void
1688StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1689{
1690 makeMemObject();
1691 mem_obj->setUris(aUrl, aLogUrl, aMethod);
c21ad0f5 1692}
1693
1694/* this just sets DELAY_SENDING */
1695void
1696StoreEntry::buffer()
1697{
1698 EBIT_SET(flags, DELAY_SENDING);
1699}
1700
438fc1e3 1701/* this just clears DELAY_SENDING and Invokes the handlers */
1702void
c21ad0f5 1703StoreEntry::flush()
438fc1e3 1704{
c21ad0f5 1705 if (EBIT_TEST(flags, DELAY_SENDING)) {
1706 EBIT_CLR(flags, DELAY_SENDING);
d88e3c49 1707 invokeHandlers();
b66315e4 1708 }
25535cbe 1709}
07304bf9 1710
47f6e231 1711int64_t
707fdc47 1712StoreEntry::objectLen() const
07304bf9 1713{
707fdc47 1714 assert(mem_obj != NULL);
1715 return mem_obj->object_sz;
07304bf9 1716}
1717
47f6e231 1718int64_t
b37bde1e 1719StoreEntry::contentLen() const
07304bf9 1720{
b37bde1e 1721 assert(mem_obj != NULL);
1722 assert(getReply() != NULL);
1723 return objectLen() - getReply()->hdr_sz;
07304bf9 1724}
f3986a15 1725
528b2c61 1726HttpReply const *
1727StoreEntry::getReply () const
f3986a15 1728{
528b2c61 1729 if (NULL == mem_obj)
62e76326 1730 return NULL;
1731
528b2c61 1732 return mem_obj->getReply();
f3986a15 1733}
db1cd23c 1734
1735void
3900307b 1736StoreEntry::reset()
db1cd23c 1737{
3900307b 1738 assert (mem_obj);
bf8fe701 1739 debugs(20, 3, "StoreEntry::reset: " << url());
3900307b 1740 mem_obj->reset();
1741 HttpReply *rep = (HttpReply *) getReply(); // bypass const
06a5ae20 1742 rep->reset();
3900307b 1743 expires = lastmod = timestamp = -1;
db1cd23c 1744}
2b906e48 1745
cd748f27 1746/*
1747 * storeFsInit
1748 *
1749 * This routine calls the SETUP routine for each fs type.
1750 * I don't know where the best place for this is, and I'm not going to shuffle
1751 * around large chunks of code right now (that can be done once its working.)
1752 */
1753void
1754storeFsInit(void)
1755{
22d38e05 1756 storeReplSetup();
cd748f27 1757}
1758
22d38e05 1759/*
1760 * called to add another store removal policy module
1761 */
1762void
a2c963ae 1763storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
22d38e05 1764{
1765 int i;
62e76326 1766
d64c1498 1767 /* find the number of currently known repl types */
5db6bf73 1768 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
d64c1498 1769 if (strcmp(storerepl_list[i].typestr, type) == 0) {
e0236918 1770 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
d64c1498
AJ
1771 return;
1772 }
22d38e05 1773 }
62e76326 1774
22d38e05 1775 /* add the new type */
e6ccf245 1776 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
62e76326 1777
22d38e05 1778 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
62e76326 1779
22d38e05 1780 storerepl_list[i].typestr = type;
62e76326 1781
22d38e05 1782 storerepl_list[i].create = create;
1783}
1784
1785/*
1786 * Create a removal policy instance
1787 */
1788RemovalPolicy *
1789createRemovalPolicy(RemovalPolicySettings * settings)
1790{
1791 storerepl_entry_t *r;
62e76326 1792
5db6bf73 1793 for (r = storerepl_list; r && r->typestr; ++r) {
62e76326 1794 if (strcmp(r->typestr, settings->type) == 0)
1795 return r->create(settings->args);
22d38e05 1796 }
62e76326 1797
e0236918
FC
1798 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1799 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1800 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
0c5ccf11 1801 fatalf("ERROR: Unknown policy %s\n", settings->type);
c21ad0f5 1802 return NULL; /* NOTREACHED */
22d38e05 1803}
1804
cd748f27 1805#if 0
fc8b9fc0 1806void
1807storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1808{
1809 if (e->swap_file_number == filn)
62e76326 1810 return;
1811
fc8b9fc0 1812 if (filn < 0) {
62e76326 1813 assert(-1 == filn);
1814 storeDirMapBitReset(e->swap_file_number);
1815 storeDirLRUDelete(e);
1816 e->swap_file_number = -1;
fc8b9fc0 1817 } else {
62e76326 1818 assert(-1 == e->swap_file_number);
1819 storeDirMapBitSet(e->swap_file_number = filn);
1820 storeDirLRUAdd(e);
fc8b9fc0 1821 }
1822}
62e76326 1823
cd748f27 1824#endif
e6ccf245 1825
eacfca83
AR
1826void
1827StoreEntry::storeErrorResponse(HttpReply *reply)
1828{
1829 lock("StoreEntry::storeErrorResponse");
1830 buffer();
1831 replaceHttpReply(reply);
1832 flush();
1833 complete();
1834 negativeCache();
1835 releaseRequest();
1836 unlock("StoreEntry::storeErrorResponse");
1837}
1838
db237875 1839/*
1840 * Replace a store entry with
528b2c61 1841 * a new reply. This eats the reply.
1842 */
4a56ee8d 1843void
3756e5c0 1844StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
4a56ee8d 1845{
bf8fe701 1846 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
62e76326 1847
4a56ee8d 1848 if (!mem_obj) {
fa84c01d 1849 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
62e76326 1850 return;
528b2c61 1851 }
62e76326 1852
4a56ee8d 1853 mem_obj->replaceHttpReply(rep);
1854
3756e5c0
AR
1855 if (andStartWriting)
1856 startWriting();
1857}
1858
3756e5c0
AR
1859void
1860StoreEntry::startWriting()
1861{
1862 Packer p;
1863
528b2c61 1864 /* TODO: when we store headers serparately remove the header portion */
1865 /* TODO: mark the length of the headers ? */
1866 /* We ONLY want the headers */
4a56ee8d 1867 packerToStoreInit(&p, this);
62e76326 1868
4a56ee8d 1869 assert (isEmpty());
3756e5c0 1870 assert(mem_obj);
9199139f 1871
3756e5c0
AR
1872 const HttpReply *rep = getReply();
1873 assert(rep);
62e76326 1874
3756e5c0
AR
1875 rep->packHeadersInto(&p);
1876 mem_obj->markEndOfReplyHeaders();
4475555f 1877 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
62e76326 1878
0521f8be 1879 rep->body.packInto(&p);
62e76326 1880
528b2c61 1881 packerClean(&p);
1882}
62e76326 1883
528b2c61 1884char const *
1885StoreEntry::getSerialisedMetaData()
1886{
1887 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1888 int swap_hdr_sz;
1889 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1890 storeSwapTLVFree(tlv_list);
aa1a691e
AR
1891 assert (swap_hdr_sz >= 0);
1892 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
528b2c61 1893 return result;
1894}
1895
0cdcf3d7
AR
1896/**
1897 * Abandon the transient entry our worker has created if neither the shared
1898 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1899 * any, should notice and use Plan B instead of getting stuck waiting for us
1900 * to start swapping the entry out.
1901 */
1902void
2da4bfe6
A
1903StoreEntry::transientsAbandonmentCheck()
1904{
0cdcf3d7 1905 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
2da4bfe6
A
1906 mem_obj->xitTable.index >= 0 && // other workers may be interested
1907 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1908 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
0cdcf3d7
AR
1909 debugs(20, 7, "cannot be shared: " << *this);
1910 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1911 Store::Root().transientsAbandon(*this);
1912 }
1913}
1914
1915void
2da4bfe6
A
1916StoreEntry::memOutDecision(const bool willCacheInRam)
1917{
0cdcf3d7
AR
1918 transientsAbandonmentCheck();
1919}
1920
1921void
2da4bfe6
A
1922StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1923{
0cdcf3d7
AR
1924 // Abandon our transient entry if neither shared memory nor disk wants it.
1925 assert(mem_obj);
1926 mem_obj->swapout.decision = decision;
1927 transientsAbandonmentCheck();
1928}
1929
528b2c61 1930void
5b55f1f1 1931StoreEntry::trimMemory(const bool preserveSwappable)
528b2c61 1932{
7fef2365 1933 /*
1934 * DPW 2007-05-09
1935 * Bug #1943. We must not let go any data for IN_MEMORY
1936 * objects. We have to wait until the mem_status changes.
1937 */
1938 if (mem_status == IN_MEMORY)
26ac0430 1939 return;
7fef2365 1940
c5426f8f
AR
1941 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1942 return; // cannot trim because we do not load them again
1943
99921d9d
AR
1944 if (preserveSwappable)
1945 mem_obj->trimSwappable();
1946 else
1947 mem_obj->trimUnSwappable();
1948
1949 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
528b2c61 1950}
62e76326 1951
0655fa4d 1952bool
190154cf 1953StoreEntry::modifiedSince(HttpRequest * request) const
0655fa4d 1954{
1955 int object_length;
1956 time_t mod_time = lastmod;
1957
1958 if (mod_time < 0)
1959 mod_time = timestamp;
1960
bf8fe701 1961 debugs(88, 3, "modifiedSince: '" << url() << "'");
0655fa4d 1962
4a7a3d56 1963 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
0655fa4d 1964
1965 if (mod_time < 0)
1966 return true;
1967
1968 /* Find size of the object */
1969 object_length = getReply()->content_length;
1970
1971 if (object_length < 0)
b37bde1e 1972 object_length = contentLen();
0655fa4d 1973
1974 if (mod_time > request->ims) {
bf8fe701 1975 debugs(88, 3, "--> YES: entry newer than client");
0655fa4d 1976 return true;
1977 } else if (mod_time < request->ims) {
bf8fe701 1978 debugs(88, 3, "--> NO: entry older than client");
0655fa4d 1979 return false;
1980 } else if (request->imslen < 0) {
bf8fe701 1981 debugs(88, 3, "--> NO: same LMT, no client length");
0655fa4d 1982 return false;
1983 } else if (request->imslen == object_length) {
bf8fe701 1984 debugs(88, 3, "--> NO: same LMT, same length");
0655fa4d 1985 return false;
1986 } else {
bf8fe701 1987 debugs(88, 3, "--> YES: same LMT, different length");
0655fa4d 1988 return true;
1989 }
1990}
1991
46017fdd
CT
1992bool
1993StoreEntry::hasEtag(ETag &etag) const
1994{
1995 if (const HttpReply *reply = getReply()) {
1996 etag = reply->header.getETag(HDR_ETAG);
1997 if (etag.str)
1998 return true;
1999 }
2000 return false;
2001}
2002
79c8035e
AR
2003bool
2004StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2005{
2006 const String reqETags = request.header.getList(HDR_IF_MATCH);
2007 return hasOneOfEtags(reqETags, false);
2008}
2009
2010bool
2011StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2012{
2013 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
2014 // weak comparison is allowed only for HEAD or full-body GET requests
450fe1cb 2015 const bool allowWeakMatch = !request.flags.isRanged &&
c2a7cefd 2016 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
79c8035e
AR
2017 return hasOneOfEtags(reqETags, allowWeakMatch);
2018}
2019
2020/// whether at least one of the request ETags matches entity ETag
2021bool
2022StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2023{
2024 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
2025 if (!repETag.str)
2026 return strListIsMember(&reqETags, "*", ',');
2027
2028 bool matched = false;
2029 const char *pos = NULL;
2030 const char *item;
2031 int ilen;
2032 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2033 if (!strncmp(item, "*", ilen))
2034 matched = true;
2035 else {
2036 String str;
2037 str.append(item, ilen);
2038 ETag reqETag;
2039 if (etagParseInit(&reqETag, str.termedBuf())) {
2040 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
b59e6847 2041 etagIsStrongEqual(repETag, reqETag);
79c8035e
AR
2042 }
2043 }
2044 }
2045 return matched;
2046}
2047
7d3c4ca1 2048SwapDir::Pointer
c8f4eac4 2049StoreEntry::store() const
2050{
2051 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2052 return INDEXSD(swap_dirn);
2053}
2054
2055void
2056StoreEntry::unlink()
2057{
f58bb2f4
AR
2058 store()->unlink(*this); // implies disconnect()
2059 swap_filen = -1;
2060 swap_dirn = -1;
2061 swap_status = SWAPOUT_NONE;
c8f4eac4 2062}
0655fa4d 2063
aa18a4ca 2064/*
2065 * return true if the entry is in a state where
2066 * it can accept more data (ie with write() method)
2067 */
2068bool
2069StoreEntry::isAccepting() const
2070{
2071 if (STORE_PENDING != store_status)
2072 return false;
2073
2074 if (EBIT_TEST(flags, ENTRY_ABORTED))
2075 return false;
2076
2077 return true;
2078}
2079
2c4cd1ad
AR
2080std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2081{
c0280457
AR
2082 os << "e:";
2083
99921d9d
AR
2084 if (e.mem_obj) {
2085 if (e.mem_obj->xitTable.index > -1)
2086 os << 't' << e.mem_obj->xitTable.index;
2087 if (e.mem_obj->memCache.index > -1)
2088 os << 'm' << e.mem_obj->memCache.index;
2089 }
c0280457 2090 if (e.swap_filen > -1 || e.swap_dirn > -1)
99921d9d 2091 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
4475555f
AR
2092
2093 os << '=';
c0280457
AR
2094
2095 // print only non-default status values, using unique letters
2096 if (e.mem_status != NOT_IN_MEMORY ||
9d4e9cfb
AR
2097 e.store_status != STORE_PENDING ||
2098 e.swap_status != SWAPOUT_NONE ||
2099 e.ping_status != PING_NONE) {
c0280457
AR
2100 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2101 if (e.store_status != STORE_PENDING) os << 's';
2102 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2103 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
c0280457
AR
2104 }
2105
2106 // print only set flags, using unique letters
2107 if (e.flags) {
2108 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2109 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
99921d9d 2110 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
c0280457
AR
2111 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2112 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
c0280457
AR
2113 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2114 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2115 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2116 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2117 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2118 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2119 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
c0280457
AR
2120 }
2121
4475555f
AR
2122 if (e.mem_obj && e.mem_obj->smpCollapsed)
2123 os << 'O';
2124
1bfe9ade 2125 return os << '/' << &e << '*' << e.locks();
2c4cd1ad
AR
2126}
2127
e6ccf245 2128/* NullStoreEntry */
2129
2130NullStoreEntry NullStoreEntry::_instance;
2131
2132NullStoreEntry *
2133NullStoreEntry::getInstance()
2134{
2135 return &_instance;
2136}
332dafa2 2137
2138char const *
2139NullStoreEntry::getMD5Text() const
2140{
2141 return "N/A";
2142}
528b2c61 2143
43ae1d95 2144void
2145NullStoreEntry::operator delete(void*)
2146{
2147 fatal ("Attempt to delete NullStoreEntry\n");
2148}
2149
528b2c61 2150char const *
2151NullStoreEntry::getSerialisedMetaData()
2152{
2153 return NULL;
2154}