]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store.cc
Simplify appending SBuf to String (#2108)
[thirdparty/squid.git] / src / store.cc
CommitLineData
30a4f2a8 1/*
1f7b830e 2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
e25c139f 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
c943f331 7 */
090089c4 8
bbc27441
AJ
9/* DEBUG: section 20 Storage Manager */
10
582c2af2 11#include "squid.h"
7e9f330d 12#include "base/AsyncCbdataCalls.h"
a4dd5bfa 13#include "base/IoManip.h"
d8ee9e8d 14#include "base/PackableStream.h"
4310f8b0 15#include "base/TextException.h"
b814e8d4 16#include "CacheDigest.h"
ec20038e 17#include "CacheManager.h"
d8ee9e8d 18#include "CollapsedForwarding.h"
ec20038e 19#include "comm/Connection.h"
7e66d5e2 20#include "comm/Read.h"
675b8408 21#include "debug/Messages.h"
d8ee9e8d
EB
22#if HAVE_DISKIO_MODULE_IPCIO
23#include "DiskIO/IpcIo/IpcIoFile.h"
24#endif
81a94152 25#include "ETag.h"
a553a5a3 26#include "event.h"
04f55905 27#include "fde.h"
af69c635 28#include "globals.h"
582c2af2 29#include "http.h"
528b2c61 30#include "HttpReply.h"
31#include "HttpRequest.h"
528b2c61 32#include "mem_node.h"
582c2af2 33#include "MemObject.h"
daed75a9 34#include "MemStore.h"
582c2af2
FC
35#include "mgr/Registration.h"
36#include "mgr/StoreIoAction.h"
e452f48d 37#include "repl_modules.h"
f206b652 38#include "RequestFlags.h"
55e1c6e8 39#include "sbuf/Stream.h"
4d5904f7 40#include "SquidConfig.h"
e4f1fdae 41#include "StatCounters.h"
582c2af2 42#include "stmem.h"
602d9612 43#include "Store.h"
b3f7fd88
AR
44#include "store/Controller.h"
45#include "store/Disk.h"
46#include "store/Disks.h"
d448e1eb 47#include "store/SwapMetaOut.h"
35a28a37 48#include "store_digest.h"
fb548aaf 49#include "store_key_md5.h"
10818c0a 50#include "store_log.h"
687f5275 51#include "store_rebuild.h"
e87137f1
FC
52#include "StoreClient.h"
53#include "StoreIOState.h"
e87137f1 54#include "StrList.h"
582c2af2 55#include "swap_log_op.h"
5bed43d6 56#include "tools.h"
9a0a18de 57#if USE_DELAY_POOLS
b67e2c8c 58#include "DelayPools.h"
59#endif
090089c4 60
ed6e9fb9
AJ
61/** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
62 * XXX: convert to MEMPROXY_CLASS() API
63 */
7ae0a0c5 64#include "mem/Allocator.h"
ed6e9fb9
AJ
65#include "mem/Pool.h"
66
074d6a40 67#include <climits>
cfb88efb 68#include <stack>
06e91875 69
090089c4 70#define REBUILD_TIMESTAMP_DELTA_MAX 2
227fbb74 71
c21ad0f5 72#define STORE_IN_MEM_BUCKETS (229)
090089c4 73
9837567d 74// TODO: Convert these string constants to enum string-arrays generated
4b981814 75
26ac0430
AJ
76const char *memStatusStr[] = {
77 "NOT_IN_MEMORY",
78 "IN_MEMORY"
79};
80
81const char *pingStatusStr[] = {
82 "PING_NONE",
83 "PING_WAITING",
84 "PING_DONE"
85};
86
87const char *storeStatusStr[] = {
88 "STORE_OK",
89 "STORE_PENDING"
90};
91
92const char *swapStatusStr[] = {
93 "SWAPOUT_NONE",
94 "SWAPOUT_WRITING",
02ba667b
EB
95 "SWAPOUT_DONE",
96 "SWAPOUT_FAILED"
26ac0430 97};
9dfb6c1c 98
25b6a907 99/*
100 * This defines an repl type
101 */
102
103typedef struct _storerepl_entry storerepl_entry_t;
104
26ac0430 105struct _storerepl_entry {
25b6a907 106 const char *typestr;
107 REMOVALPOLICYCREATE *create;
108};
109
aee3523a 110static storerepl_entry_t *storerepl_list = nullptr;
25b6a907 111
e3ef2b09 112/*
113 * local function prototypes
114 */
007b8be4 115static int getKeyCounter(void);
8423ff74 116static OBJH storeCheckCachableStats;
e42d5181 117static EVH storeLateRelease;
a21fbb54 118
e3ef2b09 119/*
120 * local variables
121 */
cfb88efb 122static std::stack<StoreEntry*> LateReleaseStack;
341876ec 123Mem::Allocator *StoreEntry::pool = nullptr;
e6ccf245 124
c8f4eac4 125void
126Store::Stats(StoreEntry * output)
127{
ced8def3 128 assert(output);
c8f4eac4 129 Root().stat(*output);
130}
131
d8ee9e8d
EB
132/// reports the current state of Store-related queues
133static void
134StatQueues(StoreEntry *e)
135{
136 assert(e);
137 PackableStream stream(*e);
138 CollapsedForwarding::StatQueue(stream);
70ac5b29 139#if HAVE_DISKIO_MODULE_IPCIO
d8ee9e8d
EB
140 stream << "\n";
141 IpcIoFile::StatQueue(stream);
70ac5b29 142#endif
d8ee9e8d
EB
143 stream.flush();
144}
145
0b934349
AJ
146// XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
147// definitions but doing so exposes bug 4370, and maybe 4354 and 4355
148void *
149StoreEntry::operator new (size_t bytecount)
150{
151 assert(bytecount == sizeof (StoreEntry));
152
153 if (!pool) {
154 pool = memPoolCreate ("StoreEntry", bytecount);
155 }
156
157 return pool->alloc();
158}
159
160void
161StoreEntry::operator delete (void *address)
162{
163 pool->freeOne(address);
164}
165
4310f8b0 166bool
1a210de4 167StoreEntry::makePublic(const KeyScope scope)
5ed72359 168{
169 /* This object can be cached for a long time */
4310f8b0 170 return !EBIT_TEST(flags, RELEASE_REQUEST) && setPublicKey(scope);
5ed72359 171}
172
173void
39fe14b2 174StoreEntry::makePrivate(const bool shareable)
5ed72359 175{
39fe14b2
EB
176 releaseRequest(shareable); /* delete object when not used */
177}
178
179void
180StoreEntry::clearPrivate()
181{
4310f8b0 182 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
39fe14b2
EB
183 EBIT_CLR(flags, KEY_PRIVATE);
184 shareableWhenPrivate = false;
5ed72359 185}
186
4310f8b0 187bool
5ed72359 188StoreEntry::cacheNegatively()
189{
190 /* This object may be negatively cached */
4310f8b0
EB
191 if (makePublic()) {
192 negativeCache();
193 return true;
194 }
195 return false;
5ed72359 196}
197
e6ccf245 198size_t
3b13a8fd 199StoreEntry::inUseCount()
e6ccf245 200{
201 if (!pool)
62e76326 202 return 0;
9f9e06f3 203 return pool->getInUseCount();
e6ccf245 204}
205
332dafa2 206const char *
3b13a8fd 207StoreEntry::getMD5Text() const
332dafa2 208{
209 return storeKeyText((const cache_key *)key);
210}
211
a46d2c0e 212size_t
384a7590 213StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
528b2c61 214{
aee3523a 215 if (mem_obj == nullptr)
4dc2b072 216 return aRange.end;
62e76326 217
bc87dc25 218#if URL_CHECKSUM_DEBUG
62e76326 219
528b2c61 220 mem_obj->checkUrlChecksum();
62e76326 221
528b2c61 222#endif
62e76326 223
a46d2c0e 224 if (!mem_obj->readAheadPolicyCanRead())
225 return 0;
62e76326 226
384a7590 227 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
a46d2c0e 228}
62e76326 229
39b5a589
AR
230bool
231StoreEntry::hasParsedReplyHeader() const
232{
233 if (mem_obj) {
234 const auto &reply = mem_obj->baseReply();
235 if (reply.pstate == Http::Message::psParsed) {
236 debugs(20, 7, reply.hdr_sz);
237 return true;
238 }
239 }
240 return false;
241}
242
a46d2c0e 243bool
ced8def3 244StoreEntry::checkDeferRead(int) const
a46d2c0e 245{
246 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
247}
62e76326 248
a46d2c0e 249void
ced8def3 250StoreEntry::setNoDelay(bool const newValue)
a46d2c0e 251{
252 if (mem_obj)
253 mem_obj->setNoDelay(newValue);
528b2c61 254}
bc87dc25 255
f25d697f
AR
256// XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
257// open swapin file, aggressively trim memory, and ignore read-ahead gap.
258// It does not mean we will read from disk exclusively (or at all!).
02ba667b
EB
259// STORE_MEM_CLIENT covers all other cases, including in-memory entries,
260// newly created entries, and entries not backed by disk or memory cache.
f25d697f
AR
261// XXX: May create STORE_DISK_CLIENT with no disk caching configured.
262// XXX: Collapsed clients cannot predict their type.
528b2c61 263store_client_t
264StoreEntry::storeClientType() const
227fbb74 265{
7d31d5fa 266 /* The needed offset isn't in memory
267 * XXX TODO: this is wrong for range requests
268 * as the needed offset may *not* be 0, AND
269 * offset 0 in the memory object is the HTTP headers.
270 */
271
9487bae9
AR
272 assert(mem_obj);
273
122a6e3c
AR
274 debugs(20, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
275
528b2c61 276 if (mem_obj->inmem_lo)
62e76326 277 return STORE_DISK_CLIENT;
278
528b2c61 279 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
62e76326 280 /* I don't think we should be adding clients to aborted entries */
e0236918 281 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
62e76326 282 return STORE_MEM_CLIENT;
528b2c61 283 }
62e76326 284
02ba667b
EB
285 if (swapoutFailed())
286 return STORE_MEM_CLIENT;
287
528b2c61 288 if (store_status == STORE_OK) {
7d31d5fa 289 /* the object has completed. */
290
344a9006 291 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
4310f8b0 292 if (swappedOut()) {
bf95c10a 293 debugs(20,7, mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
e1381638 294 if (mem_obj->endOffset() == mem_obj->object_sz) {
f25d697f 295 /* hot object fully swapped in (XXX: or swapped out?) */
e1381638
AJ
296 return STORE_MEM_CLIENT;
297 }
47ce0a58 298 } else {
e1381638
AJ
299 /* Memory-only, or currently being swapped out */
300 return STORE_MEM_CLIENT;
47ce0a58
HN
301 }
302 }
122a6e3c 303 debugs(20, 7, "STORE_OK STORE_DISK_CLIENT");
47ce0a58 304 return STORE_DISK_CLIENT;
528b2c61 305 }
62e76326 306
528b2c61 307 /* here and past, entry is STORE_PENDING */
308 /*
309 * If this is the first client, let it be the mem client
310 */
d2000809 311 if (mem_obj->nclients == 0)
62e76326 312 return STORE_MEM_CLIENT;
313
528b2c61 314 /*
315 * If there is no disk file to open yet, we must make this a
316 * mem client. If we can't open the swapin file before writing
317 * to the client, there is no guarantee that we will be able
318 * to open it later when we really need it.
319 */
320 if (swap_status == SWAPOUT_NONE)
62e76326 321 return STORE_MEM_CLIENT;
322
122a6e3c
AR
323 // TODO: The above "must make this a mem client" logic contradicts "Slight
324 // weirdness" logic in store_client::doCopy() that converts hits to misses
325 // on startSwapin() failures. We should probably attempt to open a swapin
326 // file _here_ instead (and avoid STORE_DISK_CLIENT designation for clients
327 // that fail to do so). That would also address a similar problem with Rock
328 // store that does not yet support swapin during SWAPOUT_WRITING.
329
528b2c61 330 /*
331 * otherwise, make subsequent clients read from disk so they
332 * can not delay the first, and vice-versa.
333 */
122a6e3c 334 debugs(20, 7, "STORE_PENDING STORE_DISK_CLIENT");
528b2c61 335 return STORE_DISK_CLIENT;
227fbb74 336}
337
8ebae981 338StoreEntry::StoreEntry() :
aee3523a 339 mem_obj(nullptr),
f53969cc
SM
340 timestamp(-1),
341 lastref(-1),
342 expires(-1),
438b41ba 343 lastModified_(-1),
f53969cc
SM
344 swap_file_sz(0),
345 refcount(0),
346 flags(0),
347 swap_filen(-1),
348 swap_dirn(-1),
349 mem_status(NOT_IN_MEMORY),
350 ping_status(PING_NONE),
351 store_status(STORE_PENDING),
352 swap_status(SWAPOUT_NONE),
39fe14b2
EB
353 lock_count(0),
354 shareableWhenPrivate(false)
090089c4 355{
539283df 356 debugs(20, 5, "StoreEntry constructed, this=" << this);
c8f4eac4 357}
62e76326 358
6d8d05b5
DK
359StoreEntry::~StoreEntry()
360{
539283df 361 debugs(20, 5, "StoreEntry destructed, this=" << this);
6d8d05b5
DK
362}
363
0ad2b63b
CT
364#if USE_ADAPTATION
365void
366StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
367{
368 if (!deferredProducer)
369 deferredProducer = producer;
370 else
61beade2 371 debugs(20, 5, "Deferred producer call is already set to: " <<
0ad2b63b
CT
372 *deferredProducer << ", requested call: " << *producer);
373}
374
375void
376StoreEntry::kickProducer()
377{
aee3523a 378 if (deferredProducer != nullptr) {
0ad2b63b 379 ScheduleCallHere(deferredProducer);
aee3523a 380 deferredProducer = nullptr;
0ad2b63b
CT
381 }
382}
383#endif
384
3900307b 385void
386StoreEntry::destroyMemObject()
090089c4 387{
4310f8b0 388 debugs(20, 3, mem_obj << " in " << *this);
29c56e41 389
23b79630 390 if (hasTransients())
4310f8b0 391 Store::Root().transientsDisconnect(*this);
23b79630 392 if (hasMemStore())
4310f8b0 393 Store::Root().memoryDisconnect(*this);
29c56e41 394
66d51f4f 395 if (auto memObj = mem_obj) {
29c56e41 396 setMemStatus(NOT_IN_MEMORY);
aee3523a 397 mem_obj = nullptr;
66d51f4f 398 delete memObj;
29c56e41 399 }
090089c4 400}
401
c8f4eac4 402void
528b2c61 403destroyStoreEntry(void *data)
090089c4 404{
bf95c10a 405 debugs(20, 3, "destroyStoreEntry: destroying " << data);
c8f4eac4 406 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
aee3523a 407 assert(e != nullptr);
62e76326 408
23b79630 409 if (e->hasDisk())
2745fea5 410 e->disk().disconnect(*e);
fa6d2c65 411
3900307b 412 e->destroyMemObject();
62e76326 413
3900307b 414 e->hashDelete();
62e76326 415
aee3523a 416 assert(e->key == nullptr);
62e76326 417
e6ccf245 418 delete e;
227fbb74 419}
090089c4 420
090089c4 421/* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
422
f09f5b26 423void
3900307b 424StoreEntry::hashInsert(const cache_key * someKey)
090089c4 425{
4475555f 426 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
4310f8b0 427 assert(!key);
3900307b 428 key = storeKeyDup(someKey);
429 hash_join(store_table, this);
090089c4 430}
431
3900307b 432void
433StoreEntry::hashDelete()
090089c4 434{
cb868059
AR
435 if (key) { // some test cases do not create keys and do not hashInsert()
436 hash_remove_link(store_table, this);
437 storeKeyFree((const cache_key *)key);
aee3523a 438 key = nullptr;
cb868059 439 }
090089c4 440}
441
090089c4 442/* -------------------------------------------------------------------------- */
443
6a566b9c 444void
18994992 445StoreEntry::lock(const char *context)
6a566b9c 446{
5db6bf73 447 ++lock_count;
18994992
AR
448 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
449}
450
451void
9d4e9cfb
AR
452StoreEntry::touch()
453{
c21ad0f5 454 lastref = squid_curtime;
090089c4 455}
456
b8d8561b 457void
39fe14b2 458StoreEntry::releaseRequest(const bool shareable)
2285407f 459{
4310f8b0
EB
460 debugs(20, 3, shareable << ' ' << *this);
461 if (!shareable)
462 shareableWhenPrivate = false; // may already be false
d88e3c49 463 if (EBIT_TEST(flags, RELEASE_REQUEST))
62e76326 464 return;
4310f8b0 465 setPrivateKey(shareable, true);
2285407f 466}
467
b8d8561b 468int
18994992 469StoreEntry::unlock(const char *context)
090089c4 470{
18994992
AR
471 debugs(20, 3, (context ? context : "somebody") <<
472 " unlocking key " << getMD5Text() << ' ' << *this);
c47b98ac 473 assert(lock_count > 0);
5e263176 474 --lock_count;
62e76326 475
c21ad0f5 476 if (lock_count)
477 return (int) lock_count;
62e76326 478
4310f8b0
EB
479 abandon(context);
480 return 0;
481}
62e76326 482
4310f8b0
EB
483/// keep the unlocked StoreEntry object in the local store_table (if needed) or
484/// delete it (otherwise)
485void
486StoreEntry::doAbandon(const char *context)
487{
488 debugs(20, 5, *this << " via " << (context ? context : "somebody"));
489 assert(!locked());
c21ad0f5 490 assert(storePendingNClients(this) == 0);
62e76326 491
4310f8b0
EB
492 // Both aborted local writers and aborted local readers (of remote writers)
493 // are STORE_PENDING, but aborted readers should never release().
494 if (EBIT_TEST(flags, RELEASE_REQUEST) ||
495 (store_status == STORE_PENDING && !Store::Root().transientsReader(*this))) {
5f33b71d 496 this->release();
4310f8b0 497 return;
984f9874
AR
498 }
499
9487bae9 500 Store::Root().handleIdleEntry(*this); // may delete us
090089c4 501}
502
08e5d64f 503StoreEntry *
60745f24 504storeGetPublic(const char *uri, const HttpRequestMethod& method)
08e5d64f 505{
4310f8b0 506 return Store::Root().find(storeKeyPublic(uri, method));
08e5d64f 507}
508
f66a9ef4 509StoreEntry *
1a210de4 510storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
f66a9ef4 511{
4310f8b0 512 return Store::Root().find(storeKeyPublicByRequestMethod(req, method, keyScope));
f66a9ef4 513}
514
515StoreEntry *
1a210de4 516storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
f66a9ef4 517{
1a210de4 518 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
62e76326 519
aee3523a 520 if (e == nullptr && req->method == Http::METHOD_HEAD)
62e76326 521 /* We can generate a HEAD reply from a cached GET object */
1a210de4 522 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
62e76326 523
f66a9ef4 524 return e;
525}
526
007b8be4 527static int
528getKeyCounter(void)
04e8dbaa 529{
007b8be4 530 static int key_counter = 0;
62e76326 531
007b8be4 532 if (++key_counter < 0)
62e76326 533 key_counter = 1;
534
007b8be4 535 return key_counter;
04e8dbaa 536}
537
c8f4eac4 538/* RBC 20050104 AFAICT this should become simpler:
539 * rather than reinserting with a special key it should be marked
540 * as 'released' and then cleaned up when refcounting indicates.
541 * the StoreHashIndex could well implement its 'released' in the
542 * current manner.
543 * Also, clean log writing should skip over ia,t
544 * Otherwise, we need a 'remove from the index but not the store
545 * concept'.
546 */
6c57e268 547void
4310f8b0 548StoreEntry::setPrivateKey(const bool shareable, const bool permanent)
227fbb74 549{
4310f8b0
EB
550 debugs(20, 3, shareable << permanent << ' ' << *this);
551 if (permanent)
552 EBIT_SET(flags, RELEASE_REQUEST); // may already be set
553 if (!shareable)
554 shareableWhenPrivate = false; // may already be false
555
556 if (EBIT_TEST(flags, KEY_PRIVATE))
39fe14b2 557 return;
62e76326 558
d88e3c49 559 if (key) {
4310f8b0 560 Store::Root().evictCached(*this); // all caches/workers will know
3900307b 561 hashDelete();
b109de6b 562 }
62e76326 563
0a132302 564 if (mem_obj && mem_obj->hasUris())
d88e3c49 565 mem_obj->id = getKeyCounter();
0a132302 566 const cache_key *newkey = storeKeyPrivate();
62e76326 567
aee3523a 568 assert(hash_lookup(store_table, newkey) == nullptr);
d88e3c49 569 EBIT_SET(flags, KEY_PRIVATE);
39fe14b2 570 shareableWhenPrivate = shareable;
3900307b 571 hashInsert(newkey);
227fbb74 572}
573
4310f8b0 574bool
1a210de4 575StoreEntry::setPublicKey(const KeyScope scope)
227fbb74 576{
4310f8b0 577 debugs(20, 3, *this);
d88e3c49 578 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
4310f8b0 579 return true; // already public
62e76326 580
d88e3c49 581 assert(mem_obj);
62e76326 582
f3e570e9 583 /*
584 * We can't make RELEASE_REQUEST objects public. Depending on
585 * when RELEASE_REQUEST gets set, we might not be swapping out
586 * the object. If we're not swapping out, then subsequent
587 * store clients won't be able to access object data which has
588 * been freed from memory.
d87ebd78 589 *
6919be24 590 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
f3e570e9 591 */
62e76326 592
d88e3c49 593 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
62e76326 594
4310f8b0
EB
595 try {
596 EntryGuard newVaryMarker(adjustVary(), "setPublicKey+failure");
597 const cache_key *pubKey = calcPublicKey(scope);
598 Store::Root().addWriting(this, pubKey);
599 forcePublicKey(pubKey);
600 newVaryMarker.unlockAndReset("setPublicKey+success");
601 return true;
602 } catch (const std::exception &ex) {
603 debugs(20, 2, "for " << *this << " failed: " << ex.what());
604 }
605 return false;
1a210de4 606}
62e76326 607
1a210de4
EB
608void
609StoreEntry::clearPublicKeyScope()
610{
611 if (!key || EBIT_TEST(flags, KEY_PRIVATE))
612 return; // probably the old public key was deleted or made private
b8a899c0 613
1a210de4 614 // TODO: adjustVary() when collapsed revalidation supports that
eab8dcfa 615
1a210de4
EB
616 const cache_key *newKey = calcPublicKey(ksDefault);
617 if (!storeKeyHashCmp(key, newKey))
618 return; // probably another collapsed revalidation beat us to this change
62e76326 619
1a210de4
EB
620 forcePublicKey(newKey);
621}
62e76326 622
1a210de4
EB
623/// Unconditionally sets public key for this store entry.
624/// Releases the old entry with the same public key (if any).
625void
626StoreEntry::forcePublicKey(const cache_key *newkey)
627{
4310f8b0
EB
628 debugs(20, 3, storeKeyText(newkey) << " for " << *this);
629 assert(mem_obj);
630
c877c0bc 631 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
1a210de4 632 assert(e2 != this);
4310f8b0
EB
633 debugs(20, 3, "releasing clashing " << *e2);
634 e2->release(true);
6eb42cae 635 }
62e76326 636
d88e3c49 637 if (key)
3900307b 638 hashDelete();
62e76326 639
39fe14b2 640 clearPrivate();
62e76326 641
4310f8b0 642 assert(mem_obj->hasUris());
3900307b 643 hashInsert(newkey);
62e76326 644
4310f8b0 645 if (hasDisk())
d88e3c49 646 storeDirSwapLog(this, SWAP_LOG_ADD);
227fbb74 647}
648
1a210de4
EB
649/// Calculates correct public key for feeding forcePublicKey().
650/// Assumes adjustVary() has been called for this entry already.
651const cache_key *
652StoreEntry::calcPublicKey(const KeyScope keyScope)
653{
654 assert(mem_obj);
c43405e7 655 return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
1a210de4
EB
656 storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
657}
658
659/// Updates mem_obj->request->vary_headers to reflect the current Vary.
660/// The vary_headers field is used to calculate the Vary marker key.
661/// Releases the old Vary marker with an outdated key (if any).
4310f8b0
EB
662/// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
663/// \throws std::exception on failures
664StoreEntry *
1a210de4
EB
665StoreEntry::adjustVary()
666{
667 assert(mem_obj);
668
669 if (!mem_obj->request)
4310f8b0 670 return nullptr;
1a210de4 671
c43405e7 672 HttpRequestPointer request(mem_obj->request);
66d51f4f 673 const auto &reply = mem_obj->freshestReply();
1a210de4
EB
674
675 if (mem_obj->vary_headers.isEmpty()) {
676 /* First handle the case where the object no longer varies */
677 request->vary_headers.clear();
678 } else {
679 if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
680 /* Oops.. the variance has changed. Kill the base object
681 * to record the new variance key
682 */
683 request->vary_headers.clear(); /* free old "bad" variance key */
684 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
4310f8b0 685 pe->release(true);
1a210de4
EB
686 }
687
688 /* Make sure the request knows the variance status */
689 if (request->vary_headers.isEmpty())
66d51f4f 690 request->vary_headers = httpMakeVaryMark(request.getRaw(), &reply);
1a210de4
EB
691 }
692
693 // TODO: storeGetPublic() calls below may create unlocked entries.
694 // We should add/use storeHas() API or lock/unlock those entries.
695 if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
696 /* Create "vary" base object */
1a210de4 697 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
4310f8b0
EB
698 // XXX: storeCreateEntry() already tries to make `pe` public under
699 // certain conditions. If those conditions do not apply to Vary markers,
700 // then refactor to call storeCreatePureEntry() above. Otherwise,
701 // refactor to simply check whether `pe` is already public below.
702 if (!pe->makePublic()) {
703 pe->unlock("StoreEntry::adjustVary+failed_makePublic");
704 throw TexcHere("failed to make Vary marker public");
705 }
1a210de4 706 /* We are allowed to do this typecast */
66d51f4f 707 const HttpReplyPointer rep(new HttpReply);
1a210de4 708 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
66d51f4f 709 auto vary = reply.header.getList(Http::HdrType::VARY);
1a210de4
EB
710
711 if (vary.size()) {
712 /* Again, we own this structure layout */
713 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
714 vary.clean();
715 }
716
717#if X_ACCELERATOR_VARY
66d51f4f 718 vary = reply.header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
1a210de4
EB
719
720 if (vary.size() > 0) {
721 /* Again, we own this structure layout */
722 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
723 vary.clean();
724 }
725
726#endif
4310f8b0 727 pe->replaceHttpReply(rep, false); // no write until timestampsSet()
1a210de4
EB
728
729 pe->timestampsSet();
730
4310f8b0 731 pe->startWriting(); // after timestampsSet()
1a210de4 732
ba3fe8d9 733 pe->completeSuccessfully("wrote the entire Vary marker object");
1a210de4 734
4310f8b0 735 return pe;
1a210de4 736 }
4310f8b0 737 return nullptr;
1a210de4
EB
738}
739
b8d8561b 740StoreEntry *
4310f8b0 741storeCreatePureEntry(const char *url, const char *log_url, const HttpRequestMethod& method)
090089c4 742{
aee3523a 743 StoreEntry *e = nullptr;
bf8fe701 744 debugs(20, 3, "storeCreateEntry: '" << url << "'");
090089c4 745
c877c0bc 746 e = new StoreEntry();
76d61119 747 e->createMemObject(url, log_url, method);
62e76326 748
234967c9 749 e->store_status = STORE_PENDING;
090089c4 750 e->refcount = 0;
b8de7ebe 751 e->lastref = squid_curtime;
3900307b 752 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
30a4f2a8 753 e->ping_status = PING_NONE;
d46a87a8 754 EBIT_SET(e->flags, ENTRY_VALIDATED);
090089c4 755 return e;
756}
757
1bfe9ade
AR
758StoreEntry *
759storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
760{
4310f8b0 761 StoreEntry *e = storeCreatePureEntry(url, logUrl, method);
1bfe9ade
AR
762 e->lock("storeCreateEntry");
763
4310f8b0
EB
764 if (!neighbors_do_private_keys && flags.hierarchical && flags.cachable && e->setPublicKey())
765 return e;
1bfe9ade 766
4310f8b0 767 e->setPrivateKey(false, !flags.cachable);
1bfe9ade
AR
768 return e;
769}
770
6eb42cae 771/* Mark object as expired */
b8d8561b 772void
d88e3c49 773StoreEntry::expireNow()
9174e204 774{
bf8fe701 775 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
d88e3c49 776 expires = squid_curtime;
9174e204 777}
778
528b2c61 779void
780StoreEntry::write (StoreIOBuffer writeBuffer)
781{
aee3523a 782 assert(mem_obj != nullptr);
528b2c61 783 /* This assert will change when we teach the store to update */
784 assert(store_status == STORE_PENDING);
62e76326 785
55759ffb 786 // XXX: caller uses content offset, but we also store headers
66d51f4f 787 writeBuffer.offset += mem_obj->baseReply().hdr_sz;
55759ffb 788
d2639a5b 789 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
528b2c61 790 storeGetMemSpace(writeBuffer.length);
55759ffb
AR
791 mem_obj->write(writeBuffer);
792
70eb3fde
EB
793 if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT) && !mem_obj->readAheadPolicyCanRead()) {
794 debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this);
795 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
796 }
797
798 invokeHandlers();
528b2c61 799}
800
c21ad0f5 801/* Append incoming data from a primary server to an entry. */
802void
803StoreEntry::append(char const *buf, int len)
804{
aee3523a 805 assert(mem_obj != nullptr);
3a1c3e2f 806 assert(len >= 0);
c21ad0f5 807 assert(store_status == STORE_PENDING);
528b2c61 808
809 StoreIOBuffer tempBuffer;
810 tempBuffer.data = (char *)buf;
811 tempBuffer.length = len;
aa18a4ca 812 /*
813 * XXX sigh, offset might be < 0 here, but it gets "corrected"
814 * later. This offset crap is such a mess.
815 */
66d51f4f 816 tempBuffer.offset = mem_obj->endOffset() - mem_obj->baseReply().hdr_sz;
c21ad0f5 817 write(tempBuffer);
090089c4 818}
819
0f33a01d
AJ
820void
821StoreEntry::vappendf(const char *fmt, va_list vargs)
822{
823 LOCAL_ARRAY(char, buf, 4096);
824 *buf = 0;
825 int x;
826
9a03c44b 827 va_list ap;
bd746807
AJ
828 /* Fix of bug 753r. The value of vargs is undefined
829 * after vsnprintf() returns. Make a copy of vargs
2f8abb64 830 * in case we loop around and call vsnprintf() again.
bd746807 831 */
9a03c44b 832 va_copy(ap,vargs);
bd746807
AJ
833 errno = 0;
834 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
1fab8344 835 fatal(xstrerr(errno));
bd746807
AJ
836 return;
837 }
838 va_end(ap);
0f33a01d 839
1fab8344 840 if (x < static_cast<int>(sizeof(buf))) {
0f33a01d
AJ
841 append(buf, x);
842 return;
843 }
844
845 // okay, do it the slow way.
846 char *buf2 = new char[x+1];
847 int y = vsnprintf(buf2, x+1, fmt, vargs);
848 assert(y >= 0 && y == x);
849 append(buf2, y);
850 delete[] buf2;
851}
852
853// deprecated. use StoreEntry::appendf() instead.
b8d8561b 854void
fe4e214f 855storeAppendPrintf(StoreEntry * e, const char *fmt,...)
15c05bb0 856{
62d32805 857 va_list args;
858 va_start(args, fmt);
0f33a01d 859 e->vappendf(fmt, args);
cb69b4c7 860 va_end(args);
861}
862
0f33a01d 863// deprecated. use StoreEntry::appendf() instead.
cb69b4c7 864void
865storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
866{
0f33a01d 867 e->vappendf(fmt, vargs);
c30c5a73 868}
869
26ac0430 870struct _store_check_cachable_hist {
62e76326 871
26ac0430 872 struct {
62e76326 873 int not_entry_cachable;
874 int wrong_content_length;
62e76326 875 int too_big;
876 int too_small;
877 int private_key;
878 int too_many_open_files;
879 int too_many_open_fds;
d48fa91c 880 int missing_parts;
2fadd50d 881 } no;
62e76326 882
26ac0430 883 struct {
62e76326 884 int Default;
2fadd50d
HN
885 } yes;
886} store_check_cachable_hist;
8423ff74 887
c47511fd 888int
889storeTooManyDiskFilesOpen(void)
890{
891 if (Config.max_open_disk_fds == 0)
62e76326 892 return 0;
893
83a29c95 894 if (store_open_disk_fd > Config.max_open_disk_fds)
62e76326 895 return 1;
896
c47511fd 897 return 0;
898}
899
3900307b 900int
901StoreEntry::checkTooSmall()
d20b1cd0 902{
3900307b 903 if (EBIT_TEST(flags, ENTRY_SPECIAL))
62e76326 904 return 0;
905
3900307b 906 if (STORE_OK == store_status)
41afe8b2 907 if (mem_obj->object_sz >= 0 &&
26ac0430 908 mem_obj->object_sz < Config.Store.minObjectSize)
62e76326 909 return 1;
66d51f4f
AR
910
911 const auto clen = mem().baseReply().content_length;
912 if (clen >= 0 && clen < Config.Store.minObjectSize)
913 return 1;
d20b1cd0 914 return 0;
915}
916
2be042f6
AR
917bool
918StoreEntry::checkTooBig() const
919{
920 if (mem_obj->endOffset() > store_maxobjsize)
921 return true;
922
66d51f4f
AR
923 const auto clen = mem_obj->baseReply().content_length;
924 return (clen >= 0 && clen > store_maxobjsize);
2be042f6
AR
925}
926
ddc9b32c 927// TODO: move "too many open..." checks outside -- we are called too early/late
7015a149 928bool
3900307b 929StoreEntry::checkCachable()
6602e70e 930{
97754f5a
AR
931 // XXX: This method is used for both memory and disk caches, but some
932 // checks are specific to disk caches. Move them to mayStartSwapOut().
933
934 // XXX: This method may be called several times, sometimes with different
935 // outcomes, making store_check_cachable_hist counters misleading.
936
937 // check this first to optimize handling of repeated calls for uncachables
938 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
939 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
940 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
941 return 0; // avoid rerequesting release below
942 }
943
665f2f85 944 if (EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
53521734
FC
945 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
946 ++store_check_cachable_hist.no.wrong_content_length;
947 } else if (!mem_obj) {
948 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
949 // this segfault protection, but how can we get such a HIT?
950 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
951 ++store_check_cachable_hist.no.missing_parts;
952 } else if (checkTooBig()) {
953 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
954 ++store_check_cachable_hist.no.too_big;
955 } else if (checkTooSmall()) {
956 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
957 ++store_check_cachable_hist.no.too_small;
958 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
959 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
960 ++store_check_cachable_hist.no.private_key;
961 } else if (hasDisk()) {
962 /*
963 * the remaining cases are only relevant if we haven't
964 * started swapping out the object yet.
965 */
966 return 1;
967 } else if (storeTooManyDiskFilesOpen()) {
968 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
969 ++store_check_cachable_hist.no.too_many_open_files;
970 } else if (fdNFree() < RESERVED_FD) {
971 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
972 ++store_check_cachable_hist.no.too_many_open_fds;
973 } else {
974 ++store_check_cachable_hist.yes.Default;
975 return 1;
976 }
62e76326 977
3900307b 978 releaseRequest();
6602e70e 979 return 0;
980}
981
3900307b 982void
983storeCheckCachableStats(StoreEntry *sentry)
8423ff74 984{
c40acff3 985 storeAppendPrintf(sentry, "Category\t Count\n");
8423ff74 986 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
62e76326 987 store_check_cachable_hist.no.not_entry_cachable);
8423ff74 988 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
62e76326 989 store_check_cachable_hist.no.wrong_content_length);
8423ff74 990 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
91870bfb 991 0); // TODO: Remove this backward compatibility hack.
d48fa91c
AJ
992 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
993 store_check_cachable_hist.no.missing_parts);
8423ff74 994 storeAppendPrintf(sentry, "no.too_big\t%d\n",
62e76326 995 store_check_cachable_hist.no.too_big);
d20b1cd0 996 storeAppendPrintf(sentry, "no.too_small\t%d\n",
62e76326 997 store_check_cachable_hist.no.too_small);
8423ff74 998 storeAppendPrintf(sentry, "no.private_key\t%d\n",
62e76326 999 store_check_cachable_hist.no.private_key);
c5f627c2 1000 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
62e76326 1001 store_check_cachable_hist.no.too_many_open_files);
59ffcdf8 1002 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
62e76326 1003 store_check_cachable_hist.no.too_many_open_fds);
8423ff74 1004 storeAppendPrintf(sentry, "yes.default\t%d\n",
62e76326 1005 store_check_cachable_hist.yes.Default);
8423ff74 1006}
1007
7224ca5a
AR
1008void
1009StoreEntry::lengthWentBad(const char *reason)
1010{
1011 debugs(20, 3, "because " << reason << ": " << *this);
1012 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1013 releaseRequest();
1014}
1015
ba3fe8d9
EB
1016void
1017StoreEntry::completeSuccessfully(const char * const whyWeAreSure)
1018{
1019 debugs(20, 3, whyWeAreSure << "; " << *this);
1020 complete();
1021}
1022
1023void
1024StoreEntry::completeTruncated(const char * const truncationReason)
1025{
1026 lengthWentBad(truncationReason);
1027 complete();
1028}
1029
b8d8561b 1030void
528b2c61 1031StoreEntry::complete()
090089c4 1032{
bf8fe701 1033 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
62e76326 1034
70eb3fde
EB
1035 // To preserve forwarding retries, call FwdState::complete() instead.
1036 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1037
528b2c61 1038 if (store_status != STORE_PENDING) {
62e76326 1039 /*
1040 * if we're not STORE_PENDING, then probably we got aborted
1041 * and there should be NO clients on this entry
1042 */
1043 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1044 assert(mem_obj->nclients == 0);
1045 return;
b6403fac 1046 }
62e76326 1047
528b2c61 1048 mem_obj->object_sz = mem_obj->endOffset();
7d31d5fa 1049
528b2c61 1050 store_status = STORE_OK;
7d31d5fa 1051
528b2c61 1052 assert(mem_status == NOT_IN_MEMORY);
62e76326 1053
7224ca5a
AR
1054 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1055 lengthWentBad("!validLength() in complete()");
62e76326 1056
6cfa8966 1057#if USE_CACHE_DIGESTS
528b2c61 1058 if (mem_obj->request)
62e76326 1059 mem_obj->request->hier.store_complete_stop = current_time;
1060
39edba21 1061#endif
d20b1cd0 1062 /*
d88e3c49 1063 * We used to call invokeHandlers, then storeSwapOut. However,
d20b1cd0 1064 * Madhukar Reddy <myreddy@persistence.com> reported that
1065 * responses without content length would sometimes get released
1066 * in client_side, thinking that the response is incomplete.
1067 */
d88e3c49 1068 invokeHandlers();
7e3e1d01 1069}
1070
090089c4 1071/*
474cac1b 1072 * Someone wants to abort this transfer. Set the reason in the
d5430dc8 1073 * request structure, call the callback and mark the
2b906e48 1074 * entry for releasing
090089c4 1075 */
b8d8561b 1076void
bfb55b6f 1077StoreEntry::abort()
090089c4 1078{
5db6bf73 1079 ++statCounter.aborted_requests;
bfb55b6f 1080 assert(store_status == STORE_PENDING);
aee3523a 1081 assert(mem_obj != nullptr);
bf8fe701 1082 debugs(20, 6, "storeAbort: " << getMD5Text());
34266cde 1083
1bfe9ade 1084 lock("StoreEntry::abort"); /* lock while aborting */
d88e3c49 1085 negativeCache();
34266cde 1086
d88e3c49 1087 releaseRequest();
34266cde 1088
bfb55b6f 1089 EBIT_SET(flags, ENTRY_ABORTED);
34266cde 1090
70eb3fde
EB
1091 // allow the Store clients to be told about the problem
1092 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1093
3900307b 1094 setMemStatus(NOT_IN_MEMORY);
34266cde 1095
bfb55b6f 1096 store_status = STORE_OK;
34266cde 1097
474cac1b 1098 /* Notify the server side */
62e76326 1099
7e9f330d
EB
1100 if (mem_obj->abortCallback) {
1101 ScheduleCallHere(mem_obj->abortCallback);
1102 mem_obj->abortCallback = nullptr;
bfcaf585 1103 }
62e76326 1104
1105 /* XXX Should we reverse these two, so that there is no
26ac0430 1106 * unneeded disk swapping triggered?
528b2c61 1107 */
474cac1b 1108 /* Notify the client side */
d88e3c49 1109 invokeHandlers();
62e76326 1110
aa1a691e
AR
1111 // abort swap out, invalidating what was created so far (release follows)
1112 swapOutFileClose(StoreIOState::writerGone);
62e76326 1113
1bfe9ade 1114 unlock("StoreEntry::abort"); /* unlock */
090089c4 1115}
1116
6d3c2758
HN
1117/**
1118 * Clear Memory storage to accommodate the given object len
1119 */
1120void
d4432957 1121storeGetMemSpace(int size)
090089c4 1122{
23b79630 1123 Store::Root().freeMemorySpace(size);
090089c4 1124}
1125
c8f4eac4 1126/* thunk through to Store::Root().maintain(). Note that this would be better still
26ac0430
AJ
1127 * if registered against the root store itself, but that requires more complex
1128 * update logic - bigger fish to fry first. Long term each store when
c8f4eac4 1129 * it becomes active will self register
1130 */
1131void
ced8def3 1132Store::Maintain(void *)
c8f4eac4 1133{
1134 Store::Root().maintain();
1135
1136 /* Reregister a maintain event .. */
aee3523a 1137 eventAdd("MaintainSwapSpace", Maintain, nullptr, 1.0, 1);
c8f4eac4 1138
1139}
1140
090089c4 1141/* The maximum objects to scan for maintain storage space */
c21ad0f5 1142#define MAINTAIN_MAX_SCAN 1024
1143#define MAINTAIN_MAX_REMOVE 64
090089c4 1144
6c78a099 1145void
39fe14b2 1146StoreEntry::release(const bool shareable)
090089c4 1147{
4310f8b0 1148 debugs(20, 3, shareable << ' ' << *this << ' ' << getMD5Text());
090089c4 1149 /* If, for any reason we can't discard this object because of an
1150 * outstanding request, mark it for pending release */
62e76326 1151
3900307b 1152 if (locked()) {
39fe14b2 1153 releaseRequest(shareable);
62e76326 1154 return;
090089c4 1155 }
62e76326 1156
4310f8b0 1157 if (Store::Controller::store_dirs_rebuilding && hasDisk()) {
2745fea5 1158 /* TODO: Teach disk stores to handle releases during rebuild instead. */
ce49546e 1159
2745fea5
AR
1160 // lock the entry until rebuilding is done
1161 lock("storeLateRelease");
4310f8b0 1162 releaseRequest(shareable);
2745fea5 1163 LateReleaseStack.push(this);
22c25cbb 1164 return;
43d9cf56 1165 }
62e76326 1166
5f33b71d 1167 storeLog(STORE_LOG_RELEASE, this);
4310f8b0 1168 Store::Root().evictCached(*this);
5f33b71d 1169 destroyStoreEntry(static_cast<hash_link *>(this));
090089c4 1170}
1171
e42d5181 1172static void
ced8def3 1173storeLateRelease(void *)
e42d5181 1174{
1175 StoreEntry *e;
e42d5181 1176 static int n = 0;
62e76326 1177
2745fea5 1178 if (Store::Controller::store_dirs_rebuilding) {
aee3523a 1179 eventAdd("storeLateRelease", storeLateRelease, nullptr, 1.0, 1);
62e76326 1180 return;
e42d5181 1181 }
62e76326 1182
cfb88efb
AR
1183 // TODO: this works but looks unelegant.
1184 for (int i = 0; i < 10; ++i) {
1185 if (LateReleaseStack.empty()) {
c59baaa8 1186 debugs(20, Important(30), "storeLateRelease: released " << n << " objects");
24b14da9 1187 return;
cfb88efb
AR
1188 } else {
1189 e = LateReleaseStack.top();
1190 LateReleaseStack.pop();
3aa53107 1191 }
62e76326 1192
1bfe9ade 1193 e->unlock("storeLateRelease");
5db6bf73 1194 ++n;
e42d5181 1195 }
62e76326 1196
aee3523a 1197 eventAdd("storeLateRelease", storeLateRelease, nullptr, 0.0, 1);
e42d5181 1198}
1199
66d51f4f
AR
1200/// whether the base response has all the body bytes we expect
1201/// \returns true for responses with unknown/unspecified body length
1202/// \returns true for responses with the right number of accumulated body bytes
528b2c61 1203bool
1204StoreEntry::validLength() const
6602e70e 1205{
47f6e231 1206 int64_t diff;
aee3523a 1207 assert(mem_obj != nullptr);
66d51f4f 1208 const auto reply = &mem_obj->baseReply();
bf8fe701 1209 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
e4049756 1210 debugs(20, 5, "storeEntryValidLength: object_len = " <<
707fdc47 1211 objectLen());
bf8fe701 1212 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1213 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
62e76326 1214
d8b249ef 1215 if (reply->content_length < 0) {
bf8fe701 1216 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
62e76326 1217 return 1;
ffe4a367 1218 }
62e76326 1219
07304bf9 1220 if (reply->hdr_sz == 0) {
bf8fe701 1221 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
62e76326 1222 return 1;
ffe4a367 1223 }
62e76326 1224
c2a7cefd 1225 if (mem_obj->method == Http::METHOD_HEAD) {
bf8fe701 1226 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
62e76326 1227 return 1;
ffe4a367 1228 }
62e76326 1229
9b769c67 1230 if (reply->sline.status() == Http::scNotModified)
62e76326 1231 return 1;
1232
9b769c67 1233 if (reply->sline.status() == Http::scNoContent)
62e76326 1234 return 1;
1235
707fdc47 1236 diff = reply->hdr_sz + reply->content_length - objectLen();
62e76326 1237
ebf4efff 1238 if (diff == 0)
62e76326 1239 return 1;
1240
bf8fe701 1241 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
62e76326 1242
ebf4efff 1243 return 0;
ffe4a367 1244}
6602e70e 1245
6b7d87bb
FC
1246static void
1247storeRegisterWithCacheManager(void)
1248{
8822ebee
AR
1249 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1250 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1251 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
d9fc6862 1252 storeCheckCachableStats, 0, 1);
d8ee9e8d 1253 Mgr::RegisterAction("store_queues", "SMP Transients and Caching Queues", StatQueues, 0, 1);
6b7d87bb
FC
1254}
1255
b8d8561b 1256void
1257storeInit(void)
c943f331 1258{
6a566b9c 1259 mem_policy = createRemovalPolicy(Config.memPolicy);
8638fc66 1260 storeDigestInit();
e3ef2b09 1261 storeLogOpen();
aee3523a 1262 eventAdd("storeLateRelease", storeLateRelease, nullptr, 1.0, 1);
c8f4eac4 1263 Store::Root().init();
b2c141d4 1264 storeRebuildStart();
d120ed12
FC
1265
1266 storeRegisterWithCacheManager();
62ee09ca 1267}
1268
b8d8561b 1269void
1270storeConfigure(void)
b1c0cc67 1271{
5d84beb5 1272 Store::Root().configure();
090089c4 1273}
1274
9487bae9 1275bool
97754f5a 1276StoreEntry::memoryCachable()
56f29785 1277{
97754f5a
AR
1278 if (!checkCachable())
1279 return 0;
1280
23b79630
AR
1281 if (shutting_down)
1282 return 0; // avoid heavy optional work during shutdown
1283
aee3523a 1284 if (mem_obj == nullptr)
62e76326 1285 return 0;
1286
3900307b 1287 if (mem_obj->data_hdr.size() == 0)
62e76326 1288 return 0;
1289
19fdd3f3 1290 if (mem_obj->inmem_lo != 0)
e1381638 1291 return 0;
19fdd3f3 1292
4310f8b0 1293 if (!Config.onoff.memory_cache_first && swappedOut() && refcount == 1)
e1381638 1294 return 0;
19fdd3f3
HN
1295
1296 return 1;
56f29785 1297}
1298
edce4d98 1299int
3900307b 1300StoreEntry::checkNegativeHit() const
edce4d98 1301{
3900307b 1302 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
62e76326 1303 return 0;
1304
3900307b 1305 if (expires <= squid_curtime)
62e76326 1306 return 0;
1307
3900307b 1308 if (store_status != STORE_OK)
62e76326 1309 return 0;
1310
edce4d98 1311 return 1;
1312}
1313
ac9cc053
AJ
1314/**
1315 * Set object for negative caching.
1316 * Preserves any expiry information given by the server.
1317 * In absence of proper expiry info it will set to expire immediately,
1318 * or with HTTP-violations enabled the configured negative-TTL is observed
1319 */
b8d8561b 1320void
d88e3c49 1321StoreEntry::negativeCache()
79b5cc5f 1322{
da03a7e0
AJ
1323 // XXX: should make the default for expires 0 instead of -1
1324 // so we can distinguish "Expires: -1" from nothing.
1325 if (expires <= 0)
626096be 1326#if USE_HTTP_VIOLATIONS
ac9cc053
AJ
1327 expires = squid_curtime + Config.negativeTtl;
1328#else
1329 expires = squid_curtime;
1330#endif
91870bfb
D
1331 if (expires > squid_curtime) {
1332 EBIT_SET(flags, ENTRY_NEGCACHED);
1333 debugs(20, 6, "expires = " << expires << " +" << (expires-squid_curtime) << ' ' << *this);
1334 }
79b5cc5f 1335}
0a21bd84 1336
a7e59001 1337int
1338expiresMoreThan(time_t expires, time_t when)
1339{
c21ad0f5 1340 if (expires < 0) /* No Expires given */
62e76326 1341 return 1;
1342
48f44632 1343 return (expires > (squid_curtime + when));
a7e59001 1344}
fe54d06d 1345
1346int
3900307b 1347StoreEntry::validToSend() const
fe54d06d 1348{
3900307b 1349 if (EBIT_TEST(flags, RELEASE_REQUEST))
62e76326 1350 return 0;
1351
3900307b 1352 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1353 if (expires <= squid_curtime)
62e76326 1354 return 0;
1355
3900307b 1356 if (EBIT_TEST(flags, ENTRY_ABORTED))
62e76326 1357 return 0;
1358
22696a16 1359 // now check that the entry has a cache backing or is collapsed
4310f8b0 1360 if (hasDisk()) // backed by a disk cache
22696a16
AR
1361 return 1;
1362
1363 if (swappingOut()) // will be backed by a disk cache
1364 return 1;
1365
1366 if (!mem_obj) // not backed by a memory cache and not collapsed
1367 return 0;
1368
22696a16 1369 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
a4b04ff8
AR
1370 // disk cache backing that store_client constructor will assert. XXX: This
1371 // is wrong for range requests (that could feed off nibbled memory) and for
1372 // entries backed by the shared memory cache (that could, in theory, get
1373 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1374 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
22696a16
AR
1375 return 0;
1376
a4b04ff8
AR
1377 // The following check is correct but useless at this position. TODO: Move
1378 // it up when the shared memory cache can either replenish locally nibbled
1379 // bytes or, better, does not use local RAM copy at all.
1380 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1381 // return 1;
1382
fe54d06d 1383 return 1;
1384}
62663274 1385
1a210de4 1386bool
3900307b 1387StoreEntry::timestampsSet()
ca98227c 1388{
66d51f4f
AR
1389 debugs(20, 7, *this << " had " << describeTimestamps());
1390
1391 // TODO: Remove change-reducing "&" before the official commit.
1392 const auto reply = &mem().freshestReply();
1393
2f58241d 1394 time_t served_date = reply->date;
789217a2 1395 int age = reply->header.getInt(Http::HdrType::AGE);
31d36bfd 1396 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
2f58241d 1397 /* make sure that 0 <= served_date <= squid_curtime */
62e76326 1398
2f58241d 1399 if (served_date < 0 || served_date > squid_curtime)
62e76326 1400 served_date = squid_curtime;
1401
525bf9dc
BD
1402 /* Bug 1791:
1403 * If the returned Date: is more than 24 hours older than
1404 * the squid_curtime, then one of us needs to use NTP to set our
1405 * clock. We'll pretend that our clock is right.
1406 */
1407 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1408 served_date = squid_curtime;
62e76326 1409
efd900cb 1410 /*
212cbb48 1411 * Compensate with Age header if origin server clock is ahead
1412 * of us and there is a cache in between us and the origin
1413 * server. But DONT compensate if the age value is larger than
1414 * squid_curtime because it results in a negative served_date.
efd900cb 1415 */
1416 if (age > squid_curtime - served_date)
62e76326 1417 if (squid_curtime > age)
1418 served_date = squid_curtime - age;
1419
31d36bfd
AR
1420 // compensate for Squid-to-server and server-to-Squid delays
1421 if (mem_obj && mem_obj->request) {
d8165775 1422 struct timeval responseTime;
9d6a91ba
EB
1423 if (mem_obj->request->hier.peerResponseTime(responseTime))
1424 served_date -= responseTime.tv_sec;
31d36bfd
AR
1425 }
1426
1a210de4 1427 time_t exp = 0;
0d465a25 1428 if (reply->expires > 0 && reply->date > -1)
1a210de4 1429 exp = served_date + (reply->expires - reply->date);
0d465a25 1430 else
1a210de4
EB
1431 exp = reply->expires;
1432
438b41ba
EB
1433 if (timestamp == served_date && expires == exp) {
1434 // if the reply lacks LMT, then we now know that our effective
1435 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1436 // new modification times must match
1437 if (reply->last_modified < 0 || reply->last_modified == lastModified())
1438 return false; // nothing has changed
1439 }
1a210de4
EB
1440
1441 expires = exp;
62e76326 1442
438b41ba 1443 lastModified_ = reply->last_modified;
62e76326 1444
3900307b 1445 timestamp = served_date;
1a210de4 1446
66d51f4f
AR
1447 debugs(20, 5, *this << " has " << describeTimestamps());
1448 return true;
1449}
1450
1451bool
1452StoreEntry::updateOnNotModified(const StoreEntry &e304)
1453{
1454 assert(mem_obj);
1455 assert(e304.mem_obj);
1456
1457 // update reply before calling timestampsSet() below
1458 const auto &oldReply = mem_obj->freshestReply();
1459 const auto updatedReply = oldReply.recreateOnNotModified(e304.mem_obj->baseReply());
55e1c6e8
EB
1460 if (updatedReply) { // HTTP 304 brought in new information
1461 if (updatedReply->prefixLen() > Config.maxReplyHeaderSize) {
1462 throw TextException(ToSBuf("cannot update the cached response because its updated ",
1463 updatedReply->prefixLen(), "-byte header would exceed ",
1464 Config.maxReplyHeaderSize, "-byte reply_header_max_size"), Here());
1465 }
66d51f4f 1466 mem_obj->updateReply(*updatedReply);
55e1c6e8 1467 }
66d51f4f
AR
1468 // else continue to use the previous update, if any
1469
1470 if (!timestampsSet() && !updatedReply)
1471 return false;
1472
1473 // Keep the old mem_obj->vary_headers; see HttpHeader::skipUpdateHeader().
1474
1475 debugs(20, 5, "updated basics in " << *this << " with " << e304);
1476 mem_obj->appliedUpdates = true; // helps in triage; may already be true
1a210de4 1477 return true;
ca98227c 1478}
429fdbec 1479
bfcaf585 1480void
7e9f330d 1481StoreEntry::registerAbortCallback(const AsyncCall::Pointer &handler)
bfcaf585 1482{
3900307b 1483 assert(mem_obj);
7e9f330d
EB
1484 assert(!mem_obj->abortCallback);
1485 mem_obj->abortCallback = handler;
bfcaf585 1486}
1487
1488void
7e9f330d 1489StoreEntry::unregisterAbortCallback(const char *reason)
bfcaf585 1490{
3900307b 1491 assert(mem_obj);
7e9f330d
EB
1492 if (mem_obj->abortCallback) {
1493 mem_obj->abortCallback->cancel(reason);
1494 mem_obj->abortCallback = nullptr;
8ea67c2b 1495 }
bfcaf585 1496}
88738790 1497
f09f5b26 1498void
3900307b 1499StoreEntry::dump(int l) const
1500{
bf8fe701 1501 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1502 debugs(20, l, "StoreEntry->next: " << next);
1503 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
4a7a3d56 1504 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1505 debugs(20, l, "StoreEntry->lastref: " << lastref);
1506 debugs(20, l, "StoreEntry->expires: " << expires);
438b41ba 1507 debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
4a7a3d56 1508 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
bf8fe701 1509 debugs(20, l, "StoreEntry->refcount: " << refcount);
1510 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
4a7a3d56 1511 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1512 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1513 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1514 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1515 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1516 debugs(20, l, "StoreEntry->store_status: " << store_status);
1517 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
d377699f 1518}
1519
1f38f50a 1520/*
1521 * NOTE, this function assumes only two mem states
1522 */
f09f5b26 1523void
3900307b 1524StoreEntry::setMemStatus(mem_status_t new_status)
8350fe9b 1525{
3900307b 1526 if (new_status == mem_status)
62e76326 1527 return;
1528
6ebe9a4c 1529 // are we using a shared memory cache?
daed75a9 1530 if (MemStore::Enabled()) {
9487bae9
AR
1531 // This method was designed to update replacement policy, not to
1532 // actually purge something from the memory cache (TODO: rename?).
1533 // Shared memory cache does not have a policy that needs updates.
1534 mem_status = new_status;
1535 return;
1536 }
1537
aee3523a 1538 assert(mem_obj != nullptr);
62e76326 1539
b93bcace 1540 if (new_status == IN_MEMORY) {
3900307b 1541 assert(mem_obj->inmem_lo == 0);
62e76326 1542
3900307b 1543 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
c877c0bc 1544 debugs(20, 4, "not inserting special " << *this << " into policy");
62e76326 1545 } else {
3900307b 1546 mem_policy->Add(mem_policy, this, &mem_obj->repl);
c877c0bc 1547 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
62e76326 1548 }
1549
5db6bf73 1550 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
b93bcace 1551 } else {
3900307b 1552 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
c877c0bc 1553 debugs(20, 4, "not removing special " << *this << " from policy");
62e76326 1554 } else {
3900307b 1555 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
c877c0bc 1556 debugs(20, 4, "removed " << *this);
62e76326 1557 }
1558
5e263176 1559 --hot_obj_count;
b93bcace 1560 }
62e76326 1561
3900307b 1562 mem_status = new_status;
8350fe9b 1563}
6e86c3e8 1564
9fb13bb6 1565const char *
3900307b 1566StoreEntry::url() const
9fb13bb6 1567{
aee3523a 1568 if (mem_obj == nullptr)
62e76326 1569 return "[null_mem_obj]";
9fb13bb6 1570 else
c877c0bc 1571 return mem_obj->storeId();
9fb13bb6 1572}
24ffafb4 1573
76d61119
EB
1574void
1575StoreEntry::createMemObject()
24ffafb4 1576{
76d61119
EB
1577 assert(!mem_obj);
1578 mem_obj = new MemObject();
c877c0bc 1579}
9487bae9 1580
c877c0bc
AR
1581void
1582StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1583{
76d61119
EB
1584 assert(!mem_obj);
1585 ensureMemObject(aUrl, aLogUrl, aMethod);
1586}
1587
1588void
1589StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1590{
1591 if (!mem_obj)
1592 mem_obj = new MemObject();
c877c0bc 1593 mem_obj->setUris(aUrl, aLogUrl, aMethod);
c21ad0f5 1594}
1595
7e10ac87
AJ
1596/** disable sending content to the clients.
1597 *
1598 * This just sets DELAY_SENDING.
1599 */
c21ad0f5 1600void
1601StoreEntry::buffer()
1602{
1603 EBIT_SET(flags, DELAY_SENDING);
1604}
1605
7e10ac87
AJ
1606/** flush any buffered content.
1607 *
1608 * This just clears DELAY_SENDING and Invokes the handlers
1609 * to begin sending anything that may be buffered.
1610 */
438fc1e3 1611void
c21ad0f5 1612StoreEntry::flush()
438fc1e3 1613{
c21ad0f5 1614 if (EBIT_TEST(flags, DELAY_SENDING)) {
1615 EBIT_CLR(flags, DELAY_SENDING);
d88e3c49 1616 invokeHandlers();
b66315e4 1617 }
25535cbe 1618}
07304bf9 1619
db1cd23c 1620void
3900307b 1621StoreEntry::reset()
db1cd23c 1622{
a0c227a9 1623 debugs(20, 3, url());
66d51f4f 1624 mem().reset();
438b41ba 1625 expires = lastModified_ = timestamp = -1;
db1cd23c 1626}
2b906e48 1627
cd748f27 1628/*
1629 * storeFsInit
1630 *
1631 * This routine calls the SETUP routine for each fs type.
1632 * I don't know where the best place for this is, and I'm not going to shuffle
1633 * around large chunks of code right now (that can be done once its working.)
1634 */
1635void
1636storeFsInit(void)
1637{
22d38e05 1638 storeReplSetup();
cd748f27 1639}
1640
22d38e05 1641/*
1642 * called to add another store removal policy module
1643 */
1644void
a2c963ae 1645storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
22d38e05 1646{
1647 int i;
62e76326 1648
d64c1498 1649 /* find the number of currently known repl types */
5db6bf73 1650 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
d64c1498 1651 if (strcmp(storerepl_list[i].typestr, type) == 0) {
e0236918 1652 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
d64c1498
AJ
1653 return;
1654 }
22d38e05 1655 }
62e76326 1656
22d38e05 1657 /* add the new type */
e6ccf245 1658 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
62e76326 1659
22d38e05 1660 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
62e76326 1661
22d38e05 1662 storerepl_list[i].typestr = type;
62e76326 1663
22d38e05 1664 storerepl_list[i].create = create;
1665}
1666
1667/*
1668 * Create a removal policy instance
1669 */
1670RemovalPolicy *
1671createRemovalPolicy(RemovalPolicySettings * settings)
1672{
1673 storerepl_entry_t *r;
62e76326 1674
5db6bf73 1675 for (r = storerepl_list; r && r->typestr; ++r) {
62e76326 1676 if (strcmp(r->typestr, settings->type) == 0)
1677 return r->create(settings->args);
22d38e05 1678 }
62e76326 1679
e0236918
FC
1680 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1681 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1682 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
0c5ccf11 1683 fatalf("ERROR: Unknown policy %s\n", settings->type);
aee3523a 1684 return nullptr; /* NOTREACHED */
22d38e05 1685}
1686
eacfca83
AR
1687void
1688StoreEntry::storeErrorResponse(HttpReply *reply)
1689{
1690 lock("StoreEntry::storeErrorResponse");
1691 buffer();
66d51f4f 1692 replaceHttpReply(HttpReplyPointer(reply));
eacfca83 1693 flush();
ba3fe8d9 1694 completeSuccessfully("replaceHttpReply() stored the entire error");
eacfca83 1695 negativeCache();
4310f8b0 1696 releaseRequest(false); // if it is safe to negatively cache, sharing is OK
eacfca83
AR
1697 unlock("StoreEntry::storeErrorResponse");
1698}
1699
db237875 1700/*
1701 * Replace a store entry with
528b2c61 1702 * a new reply. This eats the reply.
1703 */
4a56ee8d 1704void
66d51f4f 1705StoreEntry::replaceHttpReply(const HttpReplyPointer &rep, const bool andStartWriting)
4a56ee8d 1706{
bf8fe701 1707 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
62e76326 1708
4a56ee8d 1709 if (!mem_obj) {
fa84c01d 1710 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
62e76326 1711 return;
528b2c61 1712 }
62e76326 1713
66d51f4f 1714 mem_obj->replaceBaseReply(rep);
4a56ee8d 1715
3756e5c0
AR
1716 if (andStartWriting)
1717 startWriting();
1718}
1719
3756e5c0
AR
1720void
1721StoreEntry::startWriting()
1722{
691ad095 1723 /* TODO: when we store headers separately remove the header portion */
528b2c61 1724 /* TODO: mark the length of the headers ? */
1725 /* We ONLY want the headers */
4a56ee8d 1726 assert (isEmpty());
3756e5c0 1727 assert(mem_obj);
9199139f 1728
66d51f4f
AR
1729 // Per MemObject replies definitions, we can only write our base reply.
1730 // Currently, all callers replaceHttpReply() first, so there is no updated
1731 // reply here anyway. Eventually, we may need to support the
1732 // updateOnNotModified(),startWriting() sequence as well.
1733 assert(!mem_obj->updatedReply());
1734 const auto rep = &mem_obj->baseReply();
62e76326 1735
15179984 1736 buffer();
f8432432 1737 rep->packHeadersUsingSlowPacker(*this);
3756e5c0 1738 mem_obj->markEndOfReplyHeaders();
62e76326 1739
9358e99f
AR
1740 // Same-worker collapsing risks end with the receipt of the headers.
1741 // SMP collapsing risks remain until the headers are actually cached, but
1742 // that event is announced via CF-agnostic Store writing broadcasts.
1743 setCollapsingRequirement(false);
1744
15179984 1745 rep->body.packInto(this);
691ad095 1746 flush();
528b2c61 1747}
62e76326 1748
528b2c61 1749char const *
66d51f4f 1750StoreEntry::getSerialisedMetaData(size_t &length) const
528b2c61 1751{
d448e1eb 1752 return static_cast<const char *>(Store::PackSwapMeta(*this, length).release());
528b2c61 1753}
1754
0cdcf3d7 1755/**
24c93780
AR
1756 * If needed, signal transient entry readers that no more cache changes are
1757 * expected and, hence, they should switch to Plan B instead of getting stuck
1758 * waiting for us to start or finish storing the entry.
0cdcf3d7
AR
1759 */
1760void
24c93780
AR
1761StoreEntry::storeWritingCheckpoint()
1762{
1763 if (!hasTransients())
1764 return; // no SMP complications
1765
1766 // writers become readers but only after completeWriting() which we trigger
1767 if (Store::Root().transientsReader(*this))
1768 return; // readers do not need to inform
1769
1770 assert(mem_obj);
1771 if (mem_obj->memCache.io != Store::ioDone) {
1772 debugs(20, 7, "not done with mem-caching " << *this);
1773 return;
1774 }
1775
1776 const auto doneWithDiskCache =
1777 // will not start
1778 (mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) ||
1779 // or has started but finished already
1780 (mem_obj->swapout.decision == MemObject::SwapOut::swStarted && !swappingOut());
1781 if (!doneWithDiskCache) {
1782 debugs(20, 7, "not done with disk-caching " << *this);
1783 return;
0cdcf3d7 1784 }
24c93780
AR
1785
1786 debugs(20, 7, "done with writing " << *this);
23b79630 1787 Store::Root().noteStoppedSharedWriting(*this);
0cdcf3d7
AR
1788}
1789
1790void
24c93780 1791StoreEntry::memOutDecision(const bool willCacheInRam)
2da4bfe6 1792{
24c93780
AR
1793 if (!willCacheInRam)
1794 return storeWritingCheckpoint();
1795 assert(mem_obj->memCache.io != Store::ioDone);
1796 // and wait for storeWriterDone()
0cdcf3d7
AR
1797}
1798
1799void
2da4bfe6
A
1800StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1801{
0cdcf3d7
AR
1802 assert(mem_obj);
1803 mem_obj->swapout.decision = decision;
24c93780
AR
1804 storeWritingCheckpoint();
1805}
1806
1807void
1808StoreEntry::storeWriterDone()
1809{
1810 storeWritingCheckpoint();
0cdcf3d7
AR
1811}
1812
528b2c61 1813void
5b55f1f1 1814StoreEntry::trimMemory(const bool preserveSwappable)
528b2c61 1815{
7fef2365 1816 /*
1817 * DPW 2007-05-09
1818 * Bug #1943. We must not let go any data for IN_MEMORY
1819 * objects. We have to wait until the mem_status changes.
1820 */
1821 if (mem_status == IN_MEMORY)
26ac0430 1822 return;
7fef2365 1823
c5426f8f
AR
1824 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1825 return; // cannot trim because we do not load them again
1826
99921d9d
AR
1827 if (preserveSwappable)
1828 mem_obj->trimSwappable();
1829 else
1830 mem_obj->trimUnSwappable();
1831
1832 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
528b2c61 1833}
62e76326 1834
0655fa4d 1835bool
438b41ba 1836StoreEntry::modifiedSince(const time_t ims, const int imslen) const
0655fa4d 1837{
438b41ba 1838 const time_t mod_time = lastModified();
0655fa4d 1839
bf8fe701 1840 debugs(88, 3, "modifiedSince: '" << url() << "'");
0655fa4d 1841
4a7a3d56 1842 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
0655fa4d 1843
1844 if (mod_time < 0)
1845 return true;
1846
66d51f4f 1847 assert(imslen < 0); // TODO: Either remove imslen or support it properly.
0655fa4d 1848
438b41ba 1849 if (mod_time > ims) {
bf8fe701 1850 debugs(88, 3, "--> YES: entry newer than client");
0655fa4d 1851 return true;
438b41ba 1852 } else if (mod_time < ims) {
bf8fe701 1853 debugs(88, 3, "--> NO: entry older than client");
0655fa4d 1854 return false;
0655fa4d 1855 } else {
66d51f4f
AR
1856 debugs(88, 3, "--> NO: same LMT");
1857 return false;
0655fa4d 1858 }
1859}
1860
46017fdd
CT
1861bool
1862StoreEntry::hasEtag(ETag &etag) const
1863{
66d51f4f 1864 if (const auto reply = hasFreshestReply()) {
789217a2 1865 etag = reply->header.getETag(Http::HdrType::ETAG);
46017fdd
CT
1866 if (etag.str)
1867 return true;
1868 }
1869 return false;
1870}
1871
79c8035e
AR
1872bool
1873StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1874{
789217a2 1875 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
79c8035e
AR
1876 return hasOneOfEtags(reqETags, false);
1877}
1878
1879bool
1880StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1881{
789217a2 1882 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
79c8035e 1883 // weak comparison is allowed only for HEAD or full-body GET requests
450fe1cb 1884 const bool allowWeakMatch = !request.flags.isRanged &&
c2a7cefd 1885 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
79c8035e
AR
1886 return hasOneOfEtags(reqETags, allowWeakMatch);
1887}
1888
1889/// whether at least one of the request ETags matches entity ETag
1890bool
1891StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1892{
66d51f4f 1893 const auto repETag = mem().freshestReply().header.getETag(Http::HdrType::ETAG);
d5f18517
AJ
1894 if (!repETag.str) {
1895 static SBuf asterisk("*", 1);
1896 return strListIsMember(&reqETags, asterisk, ',');
1897 }
79c8035e
AR
1898
1899 bool matched = false;
aee3523a 1900 const char *pos = nullptr;
79c8035e
AR
1901 const char *item;
1902 int ilen;
1903 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1904 if (!strncmp(item, "*", ilen))
1905 matched = true;
1906 else {
1907 String str;
1908 str.append(item, ilen);
1909 ETag reqETag;
1910 if (etagParseInit(&reqETag, str.termedBuf())) {
1911 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
b59e6847 1912 etagIsStrongEqual(repETag, reqETag);
79c8035e
AR
1913 }
1914 }
1915 }
1916 return matched;
1917}
1918
2745fea5
AR
1919Store::Disk &
1920StoreEntry::disk() const
c8f4eac4 1921{
4310f8b0 1922 assert(hasDisk());
2745fea5
AR
1923 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
1924 assert(sd);
1925 return *sd;
c8f4eac4 1926}
0655fa4d 1927
4310f8b0
EB
1928bool
1929StoreEntry::hasDisk(const sdirno dirn, const sfileno filen) const
1930{
1931 checkDisk();
1932 if (dirn < 0 && filen < 0)
1933 return swap_dirn >= 0;
1934 Must(dirn >= 0);
1935 const bool matchingDisk = (swap_dirn == dirn);
1936 return filen < 0 ? matchingDisk : (matchingDisk && swap_filen == filen);
1937}
1938
1939void
1940StoreEntry::attachToDisk(const sdirno dirn, const sfileno fno, const swap_status_t status)
1941{
1942 debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
1943 swapStatusStr[status] << " " << dirn << " " <<
a4dd5bfa 1944 asHex(fno).upperCase().minDigits(8));
4310f8b0
EB
1945 checkDisk();
1946 swap_dirn = dirn;
1947 swap_filen = fno;
1948 swap_status = status;
1949 checkDisk();
1950}
1951
1952void
1953StoreEntry::detachFromDisk()
1954{
1955 swap_dirn = -1;
1956 swap_filen = -1;
1957 swap_status = SWAPOUT_NONE;
1958}
1959
1960void
1961StoreEntry::checkDisk() const
1962{
02ba667b
EB
1963 try {
1964 if (swap_dirn < 0) {
1965 Must(swap_filen < 0);
1966 Must(swap_status == SWAPOUT_NONE);
1967 } else {
1968 Must(swap_filen >= 0);
91d1cfb1 1969 Must(static_cast<size_t>(swap_dirn) < Config.cacheSwap.n_configured);
02ba667b
EB
1970 if (swapoutFailed()) {
1971 Must(EBIT_TEST(flags, RELEASE_REQUEST));
1972 } else {
1973 Must(swappingOut() || swappedOut());
1974 }
1975 }
1976 } catch (...) {
1977 debugs(88, DBG_IMPORTANT, "ERROR: inconsistent disk entry state " <<
1978 *this << "; problem: " << CurrentException);
1979 throw;
4310f8b0
EB
1980 }
1981}
1982
aa18a4ca 1983/*
1984 * return true if the entry is in a state where
1985 * it can accept more data (ie with write() method)
1986 */
1987bool
1988StoreEntry::isAccepting() const
1989{
1990 if (STORE_PENDING != store_status)
1991 return false;
1992
1993 if (EBIT_TEST(flags, ENTRY_ABORTED))
1994 return false;
1995
1996 return true;
1997}
1998
438b41ba
EB
1999const char *
2000StoreEntry::describeTimestamps() const
2001{
2002 LOCAL_ARRAY(char, buf, 256);
2003 snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2004 static_cast<int>(timestamp),
2005 static_cast<int>(lastref),
2006 static_cast<int>(lastModified_),
2007 static_cast<int>(expires));
2008 return buf;
2009}
2010
d2a6dcba
EB
2011void
2012StoreEntry::setCollapsingRequirement(const bool required)
819be284 2013{
9358e99f
AR
2014 if (hittingRequiresCollapsing() == required)
2015 return; // no change
2016
2017 debugs(20, 5, (required ? "adding to " : "removing from ") << *this);
d2a6dcba
EB
2018 if (required)
2019 EBIT_SET(flags, ENTRY_REQUIRES_COLLAPSING);
2020 else
2021 EBIT_CLR(flags, ENTRY_REQUIRES_COLLAPSING);
819be284
EB
2022}
2023
4310f8b0
EB
2024static std::ostream &
2025operator <<(std::ostream &os, const Store::IoStatus &io)
2026{
2027 switch (io) {
2028 case Store::ioUndecided:
2029 os << 'u';
2030 break;
2031 case Store::ioReading:
2032 os << 'r';
2033 break;
2034 case Store::ioWriting:
2035 os << 'w';
2036 break;
2037 case Store::ioDone:
2038 os << 'o';
2039 break;
2040 }
2041 return os;
2042}
2043
2c4cd1ad
AR
2044std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2045{
c0280457
AR
2046 os << "e:";
2047
4310f8b0
EB
2048 if (e.hasTransients()) {
2049 const auto &xitTable = e.mem_obj->xitTable;
2050 os << 't' << xitTable.io << xitTable.index;
2051 }
2052
2053 if (e.hasMemStore()) {
2054 const auto &memCache = e.mem_obj->memCache;
2055 os << 'm' << memCache.io << memCache.index << '@' << memCache.offset;
99921d9d 2056 }
4310f8b0
EB
2057
2058 // Do not use e.hasDisk() here because its checkDisk() call may calls us.
c0280457 2059 if (e.swap_filen > -1 || e.swap_dirn > -1)
99921d9d 2060 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
4475555f
AR
2061
2062 os << '=';
c0280457
AR
2063
2064 // print only non-default status values, using unique letters
2065 if (e.mem_status != NOT_IN_MEMORY ||
9d4e9cfb
AR
2066 e.store_status != STORE_PENDING ||
2067 e.swap_status != SWAPOUT_NONE ||
2068 e.ping_status != PING_NONE) {
c0280457
AR
2069 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2070 if (e.store_status != STORE_PENDING) os << 's';
2071 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2072 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
c0280457
AR
2073 }
2074
2075 // print only set flags, using unique letters
2076 if (e.flags) {
2077 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
fa83b766 2078 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
99921d9d 2079 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
c0280457
AR
2080 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2081 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
fa83b766 2082 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
39fe14b2
EB
2083 if (EBIT_TEST(e.flags, KEY_PRIVATE)) {
2084 os << 'I';
2085 if (e.shareableWhenPrivate)
2086 os << 'H';
2087 }
c0280457
AR
2088 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2089 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2090 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2091 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2092 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
05bf369b 2093 if (EBIT_TEST(e.flags, ENTRY_REQUIRES_COLLAPSING)) os << 'C';
c0280457
AR
2094 }
2095
1bfe9ade 2096 return os << '/' << &e << '*' << e.locks();
2c4cd1ad
AR
2097}
2098
4310f8b0
EB
2099void
2100Store::EntryGuard::onException() noexcept
2101{
2102 SWALLOW_EXCEPTIONS({
2103 entry_->releaseRequest(false);
2104 entry_->unlock(context_);
2105 });
2106}
2107