]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Maintenance: Update astyle version to 3.1 (#841)
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "base/AsyncCbdataCalls.h"
13 #include "base/PackableStream.h"
14 #include "base/TextException.h"
15 #include "CacheDigest.h"
16 #include "CacheManager.h"
17 #include "CollapsedForwarding.h"
18 #include "comm/Connection.h"
19 #include "comm/Read.h"
20 #if HAVE_DISKIO_MODULE_IPCIO
21 #include "DiskIO/IpcIo/IpcIoFile.h"
22 #endif
23 #include "ETag.h"
24 #include "event.h"
25 #include "fde.h"
26 #include "globals.h"
27 #include "http.h"
28 #include "HttpReply.h"
29 #include "HttpRequest.h"
30 #include "mem_node.h"
31 #include "MemObject.h"
32 #include "MemStore.h"
33 #include "mgr/Registration.h"
34 #include "mgr/StoreIoAction.h"
35 #include "profiler/Profiler.h"
36 #include "repl_modules.h"
37 #include "RequestFlags.h"
38 #include "SquidConfig.h"
39 #include "SquidTime.h"
40 #include "StatCounters.h"
41 #include "stmem.h"
42 #include "Store.h"
43 #include "store/Controller.h"
44 #include "store/Disk.h"
45 #include "store/Disks.h"
46 #include "store_digest.h"
47 #include "store_key_md5.h"
48 #include "store_log.h"
49 #include "store_rebuild.h"
50 #include "StoreClient.h"
51 #include "StoreIOState.h"
52 #include "StoreMeta.h"
53 #include "StrList.h"
54 #include "swap_log_op.h"
55 #include "tools.h"
56 #if USE_DELAY_POOLS
57 #include "DelayPools.h"
58 #endif
59
60 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
61 * XXX: convert to MEMPROXY_CLASS() API
62 */
63 #include "mem/Pool.h"
64
65 #include <climits>
66 #include <stack>
67
68 #define REBUILD_TIMESTAMP_DELTA_MAX 2
69
70 #define STORE_IN_MEM_BUCKETS (229)
71
72 // TODO: Convert these string constants to enum string-arrays generated
73
74 const char *memStatusStr[] = {
75 "NOT_IN_MEMORY",
76 "IN_MEMORY"
77 };
78
79 const char *pingStatusStr[] = {
80 "PING_NONE",
81 "PING_WAITING",
82 "PING_DONE"
83 };
84
85 const char *storeStatusStr[] = {
86 "STORE_OK",
87 "STORE_PENDING"
88 };
89
90 const char *swapStatusStr[] = {
91 "SWAPOUT_NONE",
92 "SWAPOUT_WRITING",
93 "SWAPOUT_DONE",
94 "SWAPOUT_FAILED"
95 };
96
97 /*
98 * This defines an repl type
99 */
100
101 typedef struct _storerepl_entry storerepl_entry_t;
102
103 struct _storerepl_entry {
104 const char *typestr;
105 REMOVALPOLICYCREATE *create;
106 };
107
108 static storerepl_entry_t *storerepl_list = NULL;
109
110 /*
111 * local function prototypes
112 */
113 static int getKeyCounter(void);
114 static OBJH storeCheckCachableStats;
115 static EVH storeLateRelease;
116
117 /*
118 * local variables
119 */
120 static std::stack<StoreEntry*> LateReleaseStack;
121 MemAllocator *StoreEntry::pool = NULL;
122
123 void
124 Store::Stats(StoreEntry * output)
125 {
126 assert(output);
127 Root().stat(*output);
128 }
129
130 /// reports the current state of Store-related queues
131 static void
132 StatQueues(StoreEntry *e)
133 {
134 assert(e);
135 PackableStream stream(*e);
136 CollapsedForwarding::StatQueue(stream);
137 #if HAVE_DISKIO_MODULE_IPCIO
138 stream << "\n";
139 IpcIoFile::StatQueue(stream);
140 #endif
141 stream.flush();
142 }
143
144 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
145 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
146 void *
147 StoreEntry::operator new (size_t bytecount)
148 {
149 assert(bytecount == sizeof (StoreEntry));
150
151 if (!pool) {
152 pool = memPoolCreate ("StoreEntry", bytecount);
153 }
154
155 return pool->alloc();
156 }
157
158 void
159 StoreEntry::operator delete (void *address)
160 {
161 pool->freeOne(address);
162 }
163
164 bool
165 StoreEntry::makePublic(const KeyScope scope)
166 {
167 /* This object can be cached for a long time */
168 return !EBIT_TEST(flags, RELEASE_REQUEST) && setPublicKey(scope);
169 }
170
171 void
172 StoreEntry::makePrivate(const bool shareable)
173 {
174 releaseRequest(shareable); /* delete object when not used */
175 }
176
177 void
178 StoreEntry::clearPrivate()
179 {
180 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
181 EBIT_CLR(flags, KEY_PRIVATE);
182 shareableWhenPrivate = false;
183 }
184
185 bool
186 StoreEntry::cacheNegatively()
187 {
188 /* This object may be negatively cached */
189 if (makePublic()) {
190 negativeCache();
191 return true;
192 }
193 return false;
194 }
195
196 size_t
197 StoreEntry::inUseCount()
198 {
199 if (!pool)
200 return 0;
201 return pool->getInUseCount();
202 }
203
204 const char *
205 StoreEntry::getMD5Text() const
206 {
207 return storeKeyText((const cache_key *)key);
208 }
209
210 #include "comm.h"
211
212 void
213 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
214 {
215 StoreEntry *anEntry = (StoreEntry *)theContext;
216 anEntry->delayAwareRead(aRead.conn,
217 aRead.buf,
218 aRead.len,
219 aRead.callback);
220 }
221
222 void
223 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
224 {
225 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
226 /* sketch: readdeferer* = getdeferer.
227 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
228 */
229
230 if (amountToRead <= 0) {
231 assert (mem_obj);
232 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
233 return;
234 }
235
236 if (fd_table[conn->fd].closing()) {
237 // Readers must have closing callbacks if they want to be notified. No
238 // readers appeared to care around 2009/12/14 as they skipped reading
239 // for other reasons. Closing may already be true at the delyaAwareRead
240 // call time or may happen while we wait after delayRead() above.
241 debugs(20, 3, "will not read from closing " << conn << " for " << callback);
242 return; // the read callback will never be called
243 }
244
245 comm_read(conn, buf, amountToRead, callback);
246 }
247
248 size_t
249 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
250 {
251 if (mem_obj == NULL)
252 return aRange.end;
253
254 #if URL_CHECKSUM_DEBUG
255
256 mem_obj->checkUrlChecksum();
257
258 #endif
259
260 if (!mem_obj->readAheadPolicyCanRead())
261 return 0;
262
263 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
264 }
265
266 bool
267 StoreEntry::checkDeferRead(int) const
268 {
269 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
270 }
271
272 void
273 StoreEntry::setNoDelay(bool const newValue)
274 {
275 if (mem_obj)
276 mem_obj->setNoDelay(newValue);
277 }
278
279 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
280 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
281 // It does not mean we will read from disk exclusively (or at all!).
282 // STORE_MEM_CLIENT covers all other cases, including in-memory entries,
283 // newly created entries, and entries not backed by disk or memory cache.
284 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
285 // XXX: Collapsed clients cannot predict their type.
286 store_client_t
287 StoreEntry::storeClientType() const
288 {
289 /* The needed offset isn't in memory
290 * XXX TODO: this is wrong for range requests
291 * as the needed offset may *not* be 0, AND
292 * offset 0 in the memory object is the HTTP headers.
293 */
294
295 assert(mem_obj);
296
297 if (mem_obj->inmem_lo)
298 return STORE_DISK_CLIENT;
299
300 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
301 /* I don't think we should be adding clients to aborted entries */
302 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
303 return STORE_MEM_CLIENT;
304 }
305
306 if (swapoutFailed())
307 return STORE_MEM_CLIENT;
308
309 if (store_status == STORE_OK) {
310 /* the object has completed. */
311
312 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
313 if (swappedOut()) {
314 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
315 if (mem_obj->endOffset() == mem_obj->object_sz) {
316 /* hot object fully swapped in (XXX: or swapped out?) */
317 return STORE_MEM_CLIENT;
318 }
319 } else {
320 /* Memory-only, or currently being swapped out */
321 return STORE_MEM_CLIENT;
322 }
323 }
324 return STORE_DISK_CLIENT;
325 }
326
327 /* here and past, entry is STORE_PENDING */
328 /*
329 * If this is the first client, let it be the mem client
330 */
331 if (mem_obj->nclients == 1)
332 return STORE_MEM_CLIENT;
333
334 /*
335 * If there is no disk file to open yet, we must make this a
336 * mem client. If we can't open the swapin file before writing
337 * to the client, there is no guarantee that we will be able
338 * to open it later when we really need it.
339 */
340 if (swap_status == SWAPOUT_NONE)
341 return STORE_MEM_CLIENT;
342
343 /*
344 * otherwise, make subsequent clients read from disk so they
345 * can not delay the first, and vice-versa.
346 */
347 return STORE_DISK_CLIENT;
348 }
349
350 StoreEntry::StoreEntry() :
351 mem_obj(NULL),
352 timestamp(-1),
353 lastref(-1),
354 expires(-1),
355 lastModified_(-1),
356 swap_file_sz(0),
357 refcount(0),
358 flags(0),
359 swap_filen(-1),
360 swap_dirn(-1),
361 mem_status(NOT_IN_MEMORY),
362 ping_status(PING_NONE),
363 store_status(STORE_PENDING),
364 swap_status(SWAPOUT_NONE),
365 lock_count(0),
366 shareableWhenPrivate(false)
367 {
368 debugs(20, 5, "StoreEntry constructed, this=" << this);
369 }
370
371 StoreEntry::~StoreEntry()
372 {
373 debugs(20, 5, "StoreEntry destructed, this=" << this);
374 }
375
376 #if USE_ADAPTATION
377 void
378 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
379 {
380 if (!deferredProducer)
381 deferredProducer = producer;
382 else
383 debugs(20, 5, "Deferred producer call is already set to: " <<
384 *deferredProducer << ", requested call: " << *producer);
385 }
386
387 void
388 StoreEntry::kickProducer()
389 {
390 if (deferredProducer != NULL) {
391 ScheduleCallHere(deferredProducer);
392 deferredProducer = NULL;
393 }
394 }
395 #endif
396
397 void
398 StoreEntry::destroyMemObject()
399 {
400 debugs(20, 3, mem_obj << " in " << *this);
401
402 // Store::Root() is FATALly missing during shutdown
403 if (hasTransients() && !shutting_down)
404 Store::Root().transientsDisconnect(*this);
405 if (hasMemStore() && !shutting_down)
406 Store::Root().memoryDisconnect(*this);
407
408 if (auto memObj = mem_obj) {
409 setMemStatus(NOT_IN_MEMORY);
410 mem_obj = NULL;
411 delete memObj;
412 }
413 }
414
415 void
416 destroyStoreEntry(void *data)
417 {
418 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
419 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
420 assert(e != NULL);
421
422 // Store::Root() is FATALly missing during shutdown
423 if (e->hasDisk() && !shutting_down)
424 e->disk().disconnect(*e);
425
426 e->destroyMemObject();
427
428 e->hashDelete();
429
430 assert(e->key == NULL);
431
432 delete e;
433 }
434
435 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
436
437 void
438 StoreEntry::hashInsert(const cache_key * someKey)
439 {
440 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
441 assert(!key);
442 key = storeKeyDup(someKey);
443 hash_join(store_table, this);
444 }
445
446 void
447 StoreEntry::hashDelete()
448 {
449 if (key) { // some test cases do not create keys and do not hashInsert()
450 hash_remove_link(store_table, this);
451 storeKeyFree((const cache_key *)key);
452 key = NULL;
453 }
454 }
455
456 /* -------------------------------------------------------------------------- */
457
458 void
459 StoreEntry::lock(const char *context)
460 {
461 ++lock_count;
462 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
463 }
464
465 void
466 StoreEntry::touch()
467 {
468 lastref = squid_curtime;
469 }
470
471 void
472 StoreEntry::releaseRequest(const bool shareable)
473 {
474 debugs(20, 3, shareable << ' ' << *this);
475 if (!shareable)
476 shareableWhenPrivate = false; // may already be false
477 if (EBIT_TEST(flags, RELEASE_REQUEST))
478 return;
479 setPrivateKey(shareable, true);
480 }
481
482 int
483 StoreEntry::unlock(const char *context)
484 {
485 debugs(20, 3, (context ? context : "somebody") <<
486 " unlocking key " << getMD5Text() << ' ' << *this);
487 assert(lock_count > 0);
488 --lock_count;
489
490 if (lock_count)
491 return (int) lock_count;
492
493 abandon(context);
494 return 0;
495 }
496
497 /// keep the unlocked StoreEntry object in the local store_table (if needed) or
498 /// delete it (otherwise)
499 void
500 StoreEntry::doAbandon(const char *context)
501 {
502 debugs(20, 5, *this << " via " << (context ? context : "somebody"));
503 assert(!locked());
504 assert(storePendingNClients(this) == 0);
505
506 // Both aborted local writers and aborted local readers (of remote writers)
507 // are STORE_PENDING, but aborted readers should never release().
508 if (EBIT_TEST(flags, RELEASE_REQUEST) ||
509 (store_status == STORE_PENDING && !Store::Root().transientsReader(*this))) {
510 this->release();
511 return;
512 }
513
514 if (EBIT_TEST(flags, KEY_PRIVATE))
515 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
516
517 Store::Root().handleIdleEntry(*this); // may delete us
518 }
519
520 StoreEntry *
521 storeGetPublic(const char *uri, const HttpRequestMethod& method)
522 {
523 return Store::Root().find(storeKeyPublic(uri, method));
524 }
525
526 StoreEntry *
527 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
528 {
529 return Store::Root().find(storeKeyPublicByRequestMethod(req, method, keyScope));
530 }
531
532 StoreEntry *
533 storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
534 {
535 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
536
537 if (e == NULL && req->method == Http::METHOD_HEAD)
538 /* We can generate a HEAD reply from a cached GET object */
539 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
540
541 return e;
542 }
543
544 static int
545 getKeyCounter(void)
546 {
547 static int key_counter = 0;
548
549 if (++key_counter < 0)
550 key_counter = 1;
551
552 return key_counter;
553 }
554
555 /* RBC 20050104 AFAICT this should become simpler:
556 * rather than reinserting with a special key it should be marked
557 * as 'released' and then cleaned up when refcounting indicates.
558 * the StoreHashIndex could well implement its 'released' in the
559 * current manner.
560 * Also, clean log writing should skip over ia,t
561 * Otherwise, we need a 'remove from the index but not the store
562 * concept'.
563 */
564 void
565 StoreEntry::setPrivateKey(const bool shareable, const bool permanent)
566 {
567 debugs(20, 3, shareable << permanent << ' ' << *this);
568 if (permanent)
569 EBIT_SET(flags, RELEASE_REQUEST); // may already be set
570 if (!shareable)
571 shareableWhenPrivate = false; // may already be false
572
573 if (EBIT_TEST(flags, KEY_PRIVATE))
574 return;
575
576 if (key) {
577 Store::Root().evictCached(*this); // all caches/workers will know
578 hashDelete();
579 }
580
581 if (mem_obj && mem_obj->hasUris())
582 mem_obj->id = getKeyCounter();
583 const cache_key *newkey = storeKeyPrivate();
584
585 assert(hash_lookup(store_table, newkey) == NULL);
586 EBIT_SET(flags, KEY_PRIVATE);
587 shareableWhenPrivate = shareable;
588 hashInsert(newkey);
589 }
590
591 bool
592 StoreEntry::setPublicKey(const KeyScope scope)
593 {
594 debugs(20, 3, *this);
595 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
596 return true; // already public
597
598 assert(mem_obj);
599
600 /*
601 * We can't make RELEASE_REQUEST objects public. Depending on
602 * when RELEASE_REQUEST gets set, we might not be swapping out
603 * the object. If we're not swapping out, then subsequent
604 * store clients won't be able to access object data which has
605 * been freed from memory.
606 *
607 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
608 */
609
610 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
611
612 try {
613 EntryGuard newVaryMarker(adjustVary(), "setPublicKey+failure");
614 const cache_key *pubKey = calcPublicKey(scope);
615 Store::Root().addWriting(this, pubKey);
616 forcePublicKey(pubKey);
617 newVaryMarker.unlockAndReset("setPublicKey+success");
618 return true;
619 } catch (const std::exception &ex) {
620 debugs(20, 2, "for " << *this << " failed: " << ex.what());
621 }
622 return false;
623 }
624
625 void
626 StoreEntry::clearPublicKeyScope()
627 {
628 if (!key || EBIT_TEST(flags, KEY_PRIVATE))
629 return; // probably the old public key was deleted or made private
630
631 // TODO: adjustVary() when collapsed revalidation supports that
632
633 const cache_key *newKey = calcPublicKey(ksDefault);
634 if (!storeKeyHashCmp(key, newKey))
635 return; // probably another collapsed revalidation beat us to this change
636
637 forcePublicKey(newKey);
638 }
639
640 /// Unconditionally sets public key for this store entry.
641 /// Releases the old entry with the same public key (if any).
642 void
643 StoreEntry::forcePublicKey(const cache_key *newkey)
644 {
645 debugs(20, 3, storeKeyText(newkey) << " for " << *this);
646 assert(mem_obj);
647
648 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
649 assert(e2 != this);
650 debugs(20, 3, "releasing clashing " << *e2);
651 e2->release(true);
652 }
653
654 if (key)
655 hashDelete();
656
657 clearPrivate();
658
659 assert(mem_obj->hasUris());
660 hashInsert(newkey);
661
662 if (hasDisk())
663 storeDirSwapLog(this, SWAP_LOG_ADD);
664 }
665
666 /// Calculates correct public key for feeding forcePublicKey().
667 /// Assumes adjustVary() has been called for this entry already.
668 const cache_key *
669 StoreEntry::calcPublicKey(const KeyScope keyScope)
670 {
671 assert(mem_obj);
672 return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
673 storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
674 }
675
676 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
677 /// The vary_headers field is used to calculate the Vary marker key.
678 /// Releases the old Vary marker with an outdated key (if any).
679 /// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
680 /// \throws std::exception on failures
681 StoreEntry *
682 StoreEntry::adjustVary()
683 {
684 assert(mem_obj);
685
686 if (!mem_obj->request)
687 return nullptr;
688
689 HttpRequestPointer request(mem_obj->request);
690 const auto &reply = mem_obj->freshestReply();
691
692 if (mem_obj->vary_headers.isEmpty()) {
693 /* First handle the case where the object no longer varies */
694 request->vary_headers.clear();
695 } else {
696 if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
697 /* Oops.. the variance has changed. Kill the base object
698 * to record the new variance key
699 */
700 request->vary_headers.clear(); /* free old "bad" variance key */
701 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
702 pe->release(true);
703 }
704
705 /* Make sure the request knows the variance status */
706 if (request->vary_headers.isEmpty())
707 request->vary_headers = httpMakeVaryMark(request.getRaw(), &reply);
708 }
709
710 // TODO: storeGetPublic() calls below may create unlocked entries.
711 // We should add/use storeHas() API or lock/unlock those entries.
712 if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
713 /* Create "vary" base object */
714 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
715 // XXX: storeCreateEntry() already tries to make `pe` public under
716 // certain conditions. If those conditions do not apply to Vary markers,
717 // then refactor to call storeCreatePureEntry() above. Otherwise,
718 // refactor to simply check whether `pe` is already public below.
719 if (!pe->makePublic()) {
720 pe->unlock("StoreEntry::adjustVary+failed_makePublic");
721 throw TexcHere("failed to make Vary marker public");
722 }
723 /* We are allowed to do this typecast */
724 const HttpReplyPointer rep(new HttpReply);
725 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
726 auto vary = reply.header.getList(Http::HdrType::VARY);
727
728 if (vary.size()) {
729 /* Again, we own this structure layout */
730 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
731 vary.clean();
732 }
733
734 #if X_ACCELERATOR_VARY
735 vary = reply.header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
736
737 if (vary.size() > 0) {
738 /* Again, we own this structure layout */
739 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
740 vary.clean();
741 }
742
743 #endif
744 pe->replaceHttpReply(rep, false); // no write until timestampsSet()
745
746 pe->timestampsSet();
747
748 pe->startWriting(); // after timestampsSet()
749
750 pe->complete();
751
752 return pe;
753 }
754 return nullptr;
755 }
756
757 StoreEntry *
758 storeCreatePureEntry(const char *url, const char *log_url, const HttpRequestMethod& method)
759 {
760 StoreEntry *e = NULL;
761 debugs(20, 3, "storeCreateEntry: '" << url << "'");
762
763 e = new StoreEntry();
764 e->createMemObject(url, log_url, method);
765
766 e->store_status = STORE_PENDING;
767 e->refcount = 0;
768 e->lastref = squid_curtime;
769 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
770 e->ping_status = PING_NONE;
771 EBIT_SET(e->flags, ENTRY_VALIDATED);
772 return e;
773 }
774
775 StoreEntry *
776 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
777 {
778 StoreEntry *e = storeCreatePureEntry(url, logUrl, method);
779 e->lock("storeCreateEntry");
780
781 if (!neighbors_do_private_keys && flags.hierarchical && flags.cachable && e->setPublicKey())
782 return e;
783
784 e->setPrivateKey(false, !flags.cachable);
785 return e;
786 }
787
788 /* Mark object as expired */
789 void
790 StoreEntry::expireNow()
791 {
792 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
793 expires = squid_curtime;
794 }
795
796 void
797 StoreEntry::write (StoreIOBuffer writeBuffer)
798 {
799 assert(mem_obj != NULL);
800 /* This assert will change when we teach the store to update */
801 PROF_start(StoreEntry_write);
802 assert(store_status == STORE_PENDING);
803
804 // XXX: caller uses content offset, but we also store headers
805 writeBuffer.offset += mem_obj->baseReply().hdr_sz;
806
807 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
808 PROF_stop(StoreEntry_write);
809 storeGetMemSpace(writeBuffer.length);
810 mem_obj->write(writeBuffer);
811
812 if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT) && !mem_obj->readAheadPolicyCanRead()) {
813 debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this);
814 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
815 }
816
817 invokeHandlers();
818 }
819
820 /* Append incoming data from a primary server to an entry. */
821 void
822 StoreEntry::append(char const *buf, int len)
823 {
824 assert(mem_obj != NULL);
825 assert(len >= 0);
826 assert(store_status == STORE_PENDING);
827
828 StoreIOBuffer tempBuffer;
829 tempBuffer.data = (char *)buf;
830 tempBuffer.length = len;
831 /*
832 * XXX sigh, offset might be < 0 here, but it gets "corrected"
833 * later. This offset crap is such a mess.
834 */
835 tempBuffer.offset = mem_obj->endOffset() - mem_obj->baseReply().hdr_sz;
836 write(tempBuffer);
837 }
838
839 void
840 StoreEntry::vappendf(const char *fmt, va_list vargs)
841 {
842 LOCAL_ARRAY(char, buf, 4096);
843 *buf = 0;
844 int x;
845
846 va_list ap;
847 /* Fix of bug 753r. The value of vargs is undefined
848 * after vsnprintf() returns. Make a copy of vargs
849 * in case we loop around and call vsnprintf() again.
850 */
851 va_copy(ap,vargs);
852 errno = 0;
853 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
854 fatal(xstrerr(errno));
855 return;
856 }
857 va_end(ap);
858
859 if (x < static_cast<int>(sizeof(buf))) {
860 append(buf, x);
861 return;
862 }
863
864 // okay, do it the slow way.
865 char *buf2 = new char[x+1];
866 int y = vsnprintf(buf2, x+1, fmt, vargs);
867 assert(y >= 0 && y == x);
868 append(buf2, y);
869 delete[] buf2;
870 }
871
872 // deprecated. use StoreEntry::appendf() instead.
873 void
874 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
875 {
876 va_list args;
877 va_start(args, fmt);
878 e->vappendf(fmt, args);
879 va_end(args);
880 }
881
882 // deprecated. use StoreEntry::appendf() instead.
883 void
884 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
885 {
886 e->vappendf(fmt, vargs);
887 }
888
889 struct _store_check_cachable_hist {
890
891 struct {
892 int not_entry_cachable;
893 int wrong_content_length;
894 int too_big;
895 int too_small;
896 int private_key;
897 int too_many_open_files;
898 int too_many_open_fds;
899 int missing_parts;
900 } no;
901
902 struct {
903 int Default;
904 } yes;
905 } store_check_cachable_hist;
906
907 int
908 storeTooManyDiskFilesOpen(void)
909 {
910 if (Config.max_open_disk_fds == 0)
911 return 0;
912
913 if (store_open_disk_fd > Config.max_open_disk_fds)
914 return 1;
915
916 return 0;
917 }
918
919 int
920 StoreEntry::checkTooSmall()
921 {
922 if (EBIT_TEST(flags, ENTRY_SPECIAL))
923 return 0;
924
925 if (STORE_OK == store_status)
926 if (mem_obj->object_sz >= 0 &&
927 mem_obj->object_sz < Config.Store.minObjectSize)
928 return 1;
929
930 const auto clen = mem().baseReply().content_length;
931 if (clen >= 0 && clen < Config.Store.minObjectSize)
932 return 1;
933 return 0;
934 }
935
936 bool
937 StoreEntry::checkTooBig() const
938 {
939 if (mem_obj->endOffset() > store_maxobjsize)
940 return true;
941
942 const auto clen = mem_obj->baseReply().content_length;
943 return (clen >= 0 && clen > store_maxobjsize);
944 }
945
946 // TODO: move "too many open..." checks outside -- we are called too early/late
947 bool
948 StoreEntry::checkCachable()
949 {
950 // XXX: This method is used for both memory and disk caches, but some
951 // checks are specific to disk caches. Move them to mayStartSwapOut().
952
953 // XXX: This method may be called several times, sometimes with different
954 // outcomes, making store_check_cachable_hist counters misleading.
955
956 // check this first to optimize handling of repeated calls for uncachables
957 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
958 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
959 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
960 return 0; // avoid rerequesting release below
961 }
962
963 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
964 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
965 ++store_check_cachable_hist.no.wrong_content_length;
966 } else if (!mem_obj) {
967 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
968 // this segfault protection, but how can we get such a HIT?
969 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
970 ++store_check_cachable_hist.no.missing_parts;
971 } else if (checkTooBig()) {
972 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
973 ++store_check_cachable_hist.no.too_big;
974 } else if (checkTooSmall()) {
975 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
976 ++store_check_cachable_hist.no.too_small;
977 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
978 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
979 ++store_check_cachable_hist.no.private_key;
980 } else if (hasDisk()) {
981 /*
982 * the remaining cases are only relevant if we haven't
983 * started swapping out the object yet.
984 */
985 return 1;
986 } else if (storeTooManyDiskFilesOpen()) {
987 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
988 ++store_check_cachable_hist.no.too_many_open_files;
989 } else if (fdNFree() < RESERVED_FD) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
991 ++store_check_cachable_hist.no.too_many_open_fds;
992 } else {
993 ++store_check_cachable_hist.yes.Default;
994 return 1;
995 }
996
997 releaseRequest();
998 return 0;
999 }
1000
1001 void
1002 storeCheckCachableStats(StoreEntry *sentry)
1003 {
1004 storeAppendPrintf(sentry, "Category\t Count\n");
1005 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1006 store_check_cachable_hist.no.not_entry_cachable);
1007 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1008 store_check_cachable_hist.no.wrong_content_length);
1009 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1010 0); // TODO: Remove this backward compatibility hack.
1011 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1012 store_check_cachable_hist.no.missing_parts);
1013 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1014 store_check_cachable_hist.no.too_big);
1015 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1016 store_check_cachable_hist.no.too_small);
1017 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1018 store_check_cachable_hist.no.private_key);
1019 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1020 store_check_cachable_hist.no.too_many_open_files);
1021 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1022 store_check_cachable_hist.no.too_many_open_fds);
1023 storeAppendPrintf(sentry, "yes.default\t%d\n",
1024 store_check_cachable_hist.yes.Default);
1025 }
1026
1027 void
1028 StoreEntry::lengthWentBad(const char *reason)
1029 {
1030 debugs(20, 3, "because " << reason << ": " << *this);
1031 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1032 releaseRequest();
1033 }
1034
1035 void
1036 StoreEntry::complete()
1037 {
1038 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1039
1040 // To preserve forwarding retries, call FwdState::complete() instead.
1041 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1042
1043 if (store_status != STORE_PENDING) {
1044 /*
1045 * if we're not STORE_PENDING, then probably we got aborted
1046 * and there should be NO clients on this entry
1047 */
1048 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1049 assert(mem_obj->nclients == 0);
1050 return;
1051 }
1052
1053 mem_obj->object_sz = mem_obj->endOffset();
1054
1055 store_status = STORE_OK;
1056
1057 assert(mem_status == NOT_IN_MEMORY);
1058
1059 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1060 lengthWentBad("!validLength() in complete()");
1061
1062 #if USE_CACHE_DIGESTS
1063 if (mem_obj->request)
1064 mem_obj->request->hier.store_complete_stop = current_time;
1065
1066 #endif
1067 /*
1068 * We used to call invokeHandlers, then storeSwapOut. However,
1069 * Madhukar Reddy <myreddy@persistence.com> reported that
1070 * responses without content length would sometimes get released
1071 * in client_side, thinking that the response is incomplete.
1072 */
1073 invokeHandlers();
1074 }
1075
1076 /*
1077 * Someone wants to abort this transfer. Set the reason in the
1078 * request structure, call the callback and mark the
1079 * entry for releasing
1080 */
1081 void
1082 StoreEntry::abort()
1083 {
1084 ++statCounter.aborted_requests;
1085 assert(store_status == STORE_PENDING);
1086 assert(mem_obj != NULL);
1087 debugs(20, 6, "storeAbort: " << getMD5Text());
1088
1089 lock("StoreEntry::abort"); /* lock while aborting */
1090 negativeCache();
1091
1092 releaseRequest();
1093
1094 EBIT_SET(flags, ENTRY_ABORTED);
1095
1096 // allow the Store clients to be told about the problem
1097 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1098
1099 setMemStatus(NOT_IN_MEMORY);
1100
1101 store_status = STORE_OK;
1102
1103 /* Notify the server side */
1104
1105 if (mem_obj->abortCallback) {
1106 ScheduleCallHere(mem_obj->abortCallback);
1107 mem_obj->abortCallback = nullptr;
1108 }
1109
1110 /* XXX Should we reverse these two, so that there is no
1111 * unneeded disk swapping triggered?
1112 */
1113 /* Notify the client side */
1114 invokeHandlers();
1115
1116 // abort swap out, invalidating what was created so far (release follows)
1117 swapOutFileClose(StoreIOState::writerGone);
1118
1119 unlock("StoreEntry::abort"); /* unlock */
1120 }
1121
1122 /**
1123 * Clear Memory storage to accommodate the given object len
1124 */
1125 void
1126 storeGetMemSpace(int size)
1127 {
1128 PROF_start(storeGetMemSpace);
1129 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1130 Store::Root().freeMemorySpace(size);
1131 PROF_stop(storeGetMemSpace);
1132 }
1133
1134 /* thunk through to Store::Root().maintain(). Note that this would be better still
1135 * if registered against the root store itself, but that requires more complex
1136 * update logic - bigger fish to fry first. Long term each store when
1137 * it becomes active will self register
1138 */
1139 void
1140 Store::Maintain(void *)
1141 {
1142 Store::Root().maintain();
1143
1144 /* Reregister a maintain event .. */
1145 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1146
1147 }
1148
1149 /* The maximum objects to scan for maintain storage space */
1150 #define MAINTAIN_MAX_SCAN 1024
1151 #define MAINTAIN_MAX_REMOVE 64
1152
1153 void
1154 StoreEntry::release(const bool shareable)
1155 {
1156 PROF_start(storeRelease);
1157 debugs(20, 3, shareable << ' ' << *this << ' ' << getMD5Text());
1158 /* If, for any reason we can't discard this object because of an
1159 * outstanding request, mark it for pending release */
1160
1161 if (locked()) {
1162 releaseRequest(shareable);
1163 PROF_stop(storeRelease);
1164 return;
1165 }
1166
1167 if (Store::Controller::store_dirs_rebuilding && hasDisk()) {
1168 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1169
1170 // lock the entry until rebuilding is done
1171 lock("storeLateRelease");
1172 releaseRequest(shareable);
1173 LateReleaseStack.push(this);
1174 PROF_stop(storeRelease);
1175 return;
1176 }
1177
1178 storeLog(STORE_LOG_RELEASE, this);
1179 Store::Root().evictCached(*this);
1180 destroyStoreEntry(static_cast<hash_link *>(this));
1181 PROF_stop(storeRelease);
1182 }
1183
1184 static void
1185 storeLateRelease(void *)
1186 {
1187 StoreEntry *e;
1188 static int n = 0;
1189
1190 if (Store::Controller::store_dirs_rebuilding) {
1191 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1192 return;
1193 }
1194
1195 // TODO: this works but looks unelegant.
1196 for (int i = 0; i < 10; ++i) {
1197 if (LateReleaseStack.empty()) {
1198 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1199 return;
1200 } else {
1201 e = LateReleaseStack.top();
1202 LateReleaseStack.pop();
1203 }
1204
1205 e->unlock("storeLateRelease");
1206 ++n;
1207 }
1208
1209 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1210 }
1211
1212 /// whether the base response has all the body bytes we expect
1213 /// \returns true for responses with unknown/unspecified body length
1214 /// \returns true for responses with the right number of accumulated body bytes
1215 bool
1216 StoreEntry::validLength() const
1217 {
1218 int64_t diff;
1219 assert(mem_obj != NULL);
1220 const auto reply = &mem_obj->baseReply();
1221 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1222 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1223 objectLen());
1224 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1225 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1226
1227 if (reply->content_length < 0) {
1228 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1229 return 1;
1230 }
1231
1232 if (reply->hdr_sz == 0) {
1233 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1234 return 1;
1235 }
1236
1237 if (mem_obj->method == Http::METHOD_HEAD) {
1238 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1239 return 1;
1240 }
1241
1242 if (reply->sline.status() == Http::scNotModified)
1243 return 1;
1244
1245 if (reply->sline.status() == Http::scNoContent)
1246 return 1;
1247
1248 diff = reply->hdr_sz + reply->content_length - objectLen();
1249
1250 if (diff == 0)
1251 return 1;
1252
1253 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1254
1255 return 0;
1256 }
1257
1258 static void
1259 storeRegisterWithCacheManager(void)
1260 {
1261 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1262 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1263 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1264 storeCheckCachableStats, 0, 1);
1265 Mgr::RegisterAction("store_queues", "SMP Transients and Caching Queues", StatQueues, 0, 1);
1266 }
1267
1268 void
1269 storeInit(void)
1270 {
1271 storeKeyInit();
1272 mem_policy = createRemovalPolicy(Config.memPolicy);
1273 storeDigestInit();
1274 storeLogOpen();
1275 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1276 Store::Root().init();
1277 storeRebuildStart();
1278
1279 storeRegisterWithCacheManager();
1280 }
1281
1282 void
1283 storeConfigure(void)
1284 {
1285 Store::Root().configure();
1286 }
1287
1288 bool
1289 StoreEntry::memoryCachable()
1290 {
1291 if (!checkCachable())
1292 return 0;
1293
1294 if (mem_obj == NULL)
1295 return 0;
1296
1297 if (mem_obj->data_hdr.size() == 0)
1298 return 0;
1299
1300 if (mem_obj->inmem_lo != 0)
1301 return 0;
1302
1303 if (!Config.onoff.memory_cache_first && swappedOut() && refcount == 1)
1304 return 0;
1305
1306 return 1;
1307 }
1308
1309 int
1310 StoreEntry::checkNegativeHit() const
1311 {
1312 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1313 return 0;
1314
1315 if (expires <= squid_curtime)
1316 return 0;
1317
1318 if (store_status != STORE_OK)
1319 return 0;
1320
1321 return 1;
1322 }
1323
1324 /**
1325 * Set object for negative caching.
1326 * Preserves any expiry information given by the server.
1327 * In absence of proper expiry info it will set to expire immediately,
1328 * or with HTTP-violations enabled the configured negative-TTL is observed
1329 */
1330 void
1331 StoreEntry::negativeCache()
1332 {
1333 // XXX: should make the default for expires 0 instead of -1
1334 // so we can distinguish "Expires: -1" from nothing.
1335 if (expires <= 0)
1336 #if USE_HTTP_VIOLATIONS
1337 expires = squid_curtime + Config.negativeTtl;
1338 #else
1339 expires = squid_curtime;
1340 #endif
1341 if (expires > squid_curtime) {
1342 EBIT_SET(flags, ENTRY_NEGCACHED);
1343 debugs(20, 6, "expires = " << expires << " +" << (expires-squid_curtime) << ' ' << *this);
1344 }
1345 }
1346
1347 void
1348 storeFreeMemory(void)
1349 {
1350 Store::FreeMemory();
1351 #if USE_CACHE_DIGESTS
1352 delete store_digest;
1353 #endif
1354 store_digest = NULL;
1355 }
1356
1357 int
1358 expiresMoreThan(time_t expires, time_t when)
1359 {
1360 if (expires < 0) /* No Expires given */
1361 return 1;
1362
1363 return (expires > (squid_curtime + when));
1364 }
1365
1366 int
1367 StoreEntry::validToSend() const
1368 {
1369 if (EBIT_TEST(flags, RELEASE_REQUEST))
1370 return 0;
1371
1372 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1373 if (expires <= squid_curtime)
1374 return 0;
1375
1376 if (EBIT_TEST(flags, ENTRY_ABORTED))
1377 return 0;
1378
1379 // now check that the entry has a cache backing or is collapsed
1380 if (hasDisk()) // backed by a disk cache
1381 return 1;
1382
1383 if (swappingOut()) // will be backed by a disk cache
1384 return 1;
1385
1386 if (!mem_obj) // not backed by a memory cache and not collapsed
1387 return 0;
1388
1389 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1390 // disk cache backing that store_client constructor will assert. XXX: This
1391 // is wrong for range requests (that could feed off nibbled memory) and for
1392 // entries backed by the shared memory cache (that could, in theory, get
1393 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1394 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1395 return 0;
1396
1397 // The following check is correct but useless at this position. TODO: Move
1398 // it up when the shared memory cache can either replenish locally nibbled
1399 // bytes or, better, does not use local RAM copy at all.
1400 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1401 // return 1;
1402
1403 return 1;
1404 }
1405
1406 bool
1407 StoreEntry::timestampsSet()
1408 {
1409 debugs(20, 7, *this << " had " << describeTimestamps());
1410
1411 // TODO: Remove change-reducing "&" before the official commit.
1412 const auto reply = &mem().freshestReply();
1413
1414 time_t served_date = reply->date;
1415 int age = reply->header.getInt(Http::HdrType::AGE);
1416 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1417 /* make sure that 0 <= served_date <= squid_curtime */
1418
1419 if (served_date < 0 || served_date > squid_curtime)
1420 served_date = squid_curtime;
1421
1422 /* Bug 1791:
1423 * If the returned Date: is more than 24 hours older than
1424 * the squid_curtime, then one of us needs to use NTP to set our
1425 * clock. We'll pretend that our clock is right.
1426 */
1427 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1428 served_date = squid_curtime;
1429
1430 /*
1431 * Compensate with Age header if origin server clock is ahead
1432 * of us and there is a cache in between us and the origin
1433 * server. But DONT compensate if the age value is larger than
1434 * squid_curtime because it results in a negative served_date.
1435 */
1436 if (age > squid_curtime - served_date)
1437 if (squid_curtime > age)
1438 served_date = squid_curtime - age;
1439
1440 // compensate for Squid-to-server and server-to-Squid delays
1441 if (mem_obj && mem_obj->request) {
1442 struct timeval responseTime;
1443 if (mem_obj->request->hier.peerResponseTime(responseTime))
1444 served_date -= responseTime.tv_sec;
1445 }
1446
1447 time_t exp = 0;
1448 if (reply->expires > 0 && reply->date > -1)
1449 exp = served_date + (reply->expires - reply->date);
1450 else
1451 exp = reply->expires;
1452
1453 if (timestamp == served_date && expires == exp) {
1454 // if the reply lacks LMT, then we now know that our effective
1455 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1456 // new modification times must match
1457 if (reply->last_modified < 0 || reply->last_modified == lastModified())
1458 return false; // nothing has changed
1459 }
1460
1461 expires = exp;
1462
1463 lastModified_ = reply->last_modified;
1464
1465 timestamp = served_date;
1466
1467 debugs(20, 5, *this << " has " << describeTimestamps());
1468 return true;
1469 }
1470
1471 bool
1472 StoreEntry::updateOnNotModified(const StoreEntry &e304)
1473 {
1474 assert(mem_obj);
1475 assert(e304.mem_obj);
1476
1477 // update reply before calling timestampsSet() below
1478 const auto &oldReply = mem_obj->freshestReply();
1479 const auto updatedReply = oldReply.recreateOnNotModified(e304.mem_obj->baseReply());
1480 if (updatedReply) // HTTP 304 brought in new information
1481 mem_obj->updateReply(*updatedReply);
1482 // else continue to use the previous update, if any
1483
1484 if (!timestampsSet() && !updatedReply)
1485 return false;
1486
1487 // Keep the old mem_obj->vary_headers; see HttpHeader::skipUpdateHeader().
1488
1489 debugs(20, 5, "updated basics in " << *this << " with " << e304);
1490 mem_obj->appliedUpdates = true; // helps in triage; may already be true
1491 return true;
1492 }
1493
1494 void
1495 StoreEntry::registerAbortCallback(const AsyncCall::Pointer &handler)
1496 {
1497 assert(mem_obj);
1498 assert(!mem_obj->abortCallback);
1499 mem_obj->abortCallback = handler;
1500 }
1501
1502 void
1503 StoreEntry::unregisterAbortCallback(const char *reason)
1504 {
1505 assert(mem_obj);
1506 if (mem_obj->abortCallback) {
1507 mem_obj->abortCallback->cancel(reason);
1508 mem_obj->abortCallback = nullptr;
1509 }
1510 }
1511
1512 void
1513 StoreEntry::dump(int l) const
1514 {
1515 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1516 debugs(20, l, "StoreEntry->next: " << next);
1517 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1518 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1519 debugs(20, l, "StoreEntry->lastref: " << lastref);
1520 debugs(20, l, "StoreEntry->expires: " << expires);
1521 debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
1522 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1523 debugs(20, l, "StoreEntry->refcount: " << refcount);
1524 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1525 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1526 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1527 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1528 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1529 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1530 debugs(20, l, "StoreEntry->store_status: " << store_status);
1531 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1532 }
1533
1534 /*
1535 * NOTE, this function assumes only two mem states
1536 */
1537 void
1538 StoreEntry::setMemStatus(mem_status_t new_status)
1539 {
1540 if (new_status == mem_status)
1541 return;
1542
1543 // are we using a shared memory cache?
1544 if (MemStore::Enabled()) {
1545 // This method was designed to update replacement policy, not to
1546 // actually purge something from the memory cache (TODO: rename?).
1547 // Shared memory cache does not have a policy that needs updates.
1548 mem_status = new_status;
1549 return;
1550 }
1551
1552 assert(mem_obj != NULL);
1553
1554 if (new_status == IN_MEMORY) {
1555 assert(mem_obj->inmem_lo == 0);
1556
1557 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1558 debugs(20, 4, "not inserting special " << *this << " into policy");
1559 } else {
1560 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1561 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1562 }
1563
1564 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1565 } else {
1566 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1567 debugs(20, 4, "not removing special " << *this << " from policy");
1568 } else {
1569 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1570 debugs(20, 4, "removed " << *this);
1571 }
1572
1573 --hot_obj_count;
1574 }
1575
1576 mem_status = new_status;
1577 }
1578
1579 const char *
1580 StoreEntry::url() const
1581 {
1582 if (mem_obj == NULL)
1583 return "[null_mem_obj]";
1584 else
1585 return mem_obj->storeId();
1586 }
1587
1588 void
1589 StoreEntry::createMemObject()
1590 {
1591 assert(!mem_obj);
1592 mem_obj = new MemObject();
1593 }
1594
1595 void
1596 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1597 {
1598 assert(!mem_obj);
1599 ensureMemObject(aUrl, aLogUrl, aMethod);
1600 }
1601
1602 void
1603 StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1604 {
1605 if (!mem_obj)
1606 mem_obj = new MemObject();
1607 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1608 }
1609
1610 /** disable sending content to the clients.
1611 *
1612 * This just sets DELAY_SENDING.
1613 */
1614 void
1615 StoreEntry::buffer()
1616 {
1617 EBIT_SET(flags, DELAY_SENDING);
1618 }
1619
1620 /** flush any buffered content.
1621 *
1622 * This just clears DELAY_SENDING and Invokes the handlers
1623 * to begin sending anything that may be buffered.
1624 */
1625 void
1626 StoreEntry::flush()
1627 {
1628 if (EBIT_TEST(flags, DELAY_SENDING)) {
1629 EBIT_CLR(flags, DELAY_SENDING);
1630 invokeHandlers();
1631 }
1632 }
1633
1634 void
1635 StoreEntry::reset()
1636 {
1637 debugs(20, 3, url());
1638 mem().reset();
1639 expires = lastModified_ = timestamp = -1;
1640 }
1641
1642 /*
1643 * storeFsInit
1644 *
1645 * This routine calls the SETUP routine for each fs type.
1646 * I don't know where the best place for this is, and I'm not going to shuffle
1647 * around large chunks of code right now (that can be done once its working.)
1648 */
1649 void
1650 storeFsInit(void)
1651 {
1652 storeReplSetup();
1653 }
1654
1655 /*
1656 * called to add another store removal policy module
1657 */
1658 void
1659 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1660 {
1661 int i;
1662
1663 /* find the number of currently known repl types */
1664 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1665 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1666 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1667 return;
1668 }
1669 }
1670
1671 /* add the new type */
1672 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1673
1674 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1675
1676 storerepl_list[i].typestr = type;
1677
1678 storerepl_list[i].create = create;
1679 }
1680
1681 /*
1682 * Create a removal policy instance
1683 */
1684 RemovalPolicy *
1685 createRemovalPolicy(RemovalPolicySettings * settings)
1686 {
1687 storerepl_entry_t *r;
1688
1689 for (r = storerepl_list; r && r->typestr; ++r) {
1690 if (strcmp(r->typestr, settings->type) == 0)
1691 return r->create(settings->args);
1692 }
1693
1694 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1695 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1696 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1697 fatalf("ERROR: Unknown policy %s\n", settings->type);
1698 return NULL; /* NOTREACHED */
1699 }
1700
1701 void
1702 StoreEntry::storeErrorResponse(HttpReply *reply)
1703 {
1704 lock("StoreEntry::storeErrorResponse");
1705 buffer();
1706 replaceHttpReply(HttpReplyPointer(reply));
1707 flush();
1708 complete();
1709 negativeCache();
1710 releaseRequest(false); // if it is safe to negatively cache, sharing is OK
1711 unlock("StoreEntry::storeErrorResponse");
1712 }
1713
1714 /*
1715 * Replace a store entry with
1716 * a new reply. This eats the reply.
1717 */
1718 void
1719 StoreEntry::replaceHttpReply(const HttpReplyPointer &rep, const bool andStartWriting)
1720 {
1721 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1722
1723 if (!mem_obj) {
1724 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1725 return;
1726 }
1727
1728 mem_obj->replaceBaseReply(rep);
1729
1730 if (andStartWriting)
1731 startWriting();
1732 }
1733
1734 void
1735 StoreEntry::startWriting()
1736 {
1737 /* TODO: when we store headers separately remove the header portion */
1738 /* TODO: mark the length of the headers ? */
1739 /* We ONLY want the headers */
1740 assert (isEmpty());
1741 assert(mem_obj);
1742
1743 // Per MemObject replies definitions, we can only write our base reply.
1744 // Currently, all callers replaceHttpReply() first, so there is no updated
1745 // reply here anyway. Eventually, we may need to support the
1746 // updateOnNotModified(),startWriting() sequence as well.
1747 assert(!mem_obj->updatedReply());
1748 const auto rep = &mem_obj->baseReply();
1749
1750 buffer();
1751 rep->packHeadersUsingSlowPacker(*this);
1752 mem_obj->markEndOfReplyHeaders();
1753
1754 rep->body.packInto(this);
1755 flush();
1756
1757 // The entry headers are written, new clients
1758 // should not collapse anymore.
1759 if (hittingRequiresCollapsing()) {
1760 setCollapsingRequirement(false);
1761 Store::Root().transientsClearCollapsingRequirement(*this);
1762 }
1763 }
1764
1765 char const *
1766 StoreEntry::getSerialisedMetaData(size_t &length) const
1767 {
1768 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1769 int swap_hdr_sz;
1770 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1771 storeSwapTLVFree(tlv_list);
1772 assert (swap_hdr_sz >= 0);
1773 length = static_cast<size_t>(swap_hdr_sz);
1774 return result;
1775 }
1776
1777 /**
1778 * Abandon the transient entry our worker has created if neither the shared
1779 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1780 * any, should notice and use Plan B instead of getting stuck waiting for us
1781 * to start swapping the entry out.
1782 */
1783 void
1784 StoreEntry::transientsAbandonmentCheck()
1785 {
1786 if (mem_obj && !Store::Root().transientsReader(*this) && // this worker is responsible
1787 hasTransients() && // other workers may be interested
1788 !hasMemStore() && // rejected by the shared memory cache
1789 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1790 debugs(20, 7, "cannot be shared: " << *this);
1791 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1792 Store::Root().stopSharing(*this);
1793 }
1794 }
1795
1796 void
1797 StoreEntry::memOutDecision(const bool)
1798 {
1799 transientsAbandonmentCheck();
1800 }
1801
1802 void
1803 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1804 {
1805 // Abandon our transient entry if neither shared memory nor disk wants it.
1806 assert(mem_obj);
1807 mem_obj->swapout.decision = decision;
1808 transientsAbandonmentCheck();
1809 }
1810
1811 void
1812 StoreEntry::trimMemory(const bool preserveSwappable)
1813 {
1814 /*
1815 * DPW 2007-05-09
1816 * Bug #1943. We must not let go any data for IN_MEMORY
1817 * objects. We have to wait until the mem_status changes.
1818 */
1819 if (mem_status == IN_MEMORY)
1820 return;
1821
1822 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1823 return; // cannot trim because we do not load them again
1824
1825 if (preserveSwappable)
1826 mem_obj->trimSwappable();
1827 else
1828 mem_obj->trimUnSwappable();
1829
1830 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1831 }
1832
1833 bool
1834 StoreEntry::modifiedSince(const time_t ims, const int imslen) const
1835 {
1836 const time_t mod_time = lastModified();
1837
1838 debugs(88, 3, "modifiedSince: '" << url() << "'");
1839
1840 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1841
1842 if (mod_time < 0)
1843 return true;
1844
1845 assert(imslen < 0); // TODO: Either remove imslen or support it properly.
1846
1847 if (mod_time > ims) {
1848 debugs(88, 3, "--> YES: entry newer than client");
1849 return true;
1850 } else if (mod_time < ims) {
1851 debugs(88, 3, "--> NO: entry older than client");
1852 return false;
1853 } else {
1854 debugs(88, 3, "--> NO: same LMT");
1855 return false;
1856 }
1857 }
1858
1859 bool
1860 StoreEntry::hasEtag(ETag &etag) const
1861 {
1862 if (const auto reply = hasFreshestReply()) {
1863 etag = reply->header.getETag(Http::HdrType::ETAG);
1864 if (etag.str)
1865 return true;
1866 }
1867 return false;
1868 }
1869
1870 bool
1871 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1872 {
1873 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
1874 return hasOneOfEtags(reqETags, false);
1875 }
1876
1877 bool
1878 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1879 {
1880 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
1881 // weak comparison is allowed only for HEAD or full-body GET requests
1882 const bool allowWeakMatch = !request.flags.isRanged &&
1883 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1884 return hasOneOfEtags(reqETags, allowWeakMatch);
1885 }
1886
1887 /// whether at least one of the request ETags matches entity ETag
1888 bool
1889 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1890 {
1891 const auto repETag = mem().freshestReply().header.getETag(Http::HdrType::ETAG);
1892 if (!repETag.str) {
1893 static SBuf asterisk("*", 1);
1894 return strListIsMember(&reqETags, asterisk, ',');
1895 }
1896
1897 bool matched = false;
1898 const char *pos = NULL;
1899 const char *item;
1900 int ilen;
1901 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1902 if (!strncmp(item, "*", ilen))
1903 matched = true;
1904 else {
1905 String str;
1906 str.append(item, ilen);
1907 ETag reqETag;
1908 if (etagParseInit(&reqETag, str.termedBuf())) {
1909 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1910 etagIsStrongEqual(repETag, reqETag);
1911 }
1912 }
1913 }
1914 return matched;
1915 }
1916
1917 Store::Disk &
1918 StoreEntry::disk() const
1919 {
1920 assert(hasDisk());
1921 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
1922 assert(sd);
1923 return *sd;
1924 }
1925
1926 bool
1927 StoreEntry::hasDisk(const sdirno dirn, const sfileno filen) const
1928 {
1929 checkDisk();
1930 if (dirn < 0 && filen < 0)
1931 return swap_dirn >= 0;
1932 Must(dirn >= 0);
1933 const bool matchingDisk = (swap_dirn == dirn);
1934 return filen < 0 ? matchingDisk : (matchingDisk && swap_filen == filen);
1935 }
1936
1937 void
1938 StoreEntry::attachToDisk(const sdirno dirn, const sfileno fno, const swap_status_t status)
1939 {
1940 debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
1941 swapStatusStr[status] << " " << dirn << " " <<
1942 std::hex << std::setw(8) << std::setfill('0') <<
1943 std::uppercase << fno);
1944 checkDisk();
1945 swap_dirn = dirn;
1946 swap_filen = fno;
1947 swap_status = status;
1948 checkDisk();
1949 }
1950
1951 void
1952 StoreEntry::detachFromDisk()
1953 {
1954 swap_dirn = -1;
1955 swap_filen = -1;
1956 swap_status = SWAPOUT_NONE;
1957 }
1958
1959 void
1960 StoreEntry::checkDisk() const
1961 {
1962 try {
1963 if (swap_dirn < 0) {
1964 Must(swap_filen < 0);
1965 Must(swap_status == SWAPOUT_NONE);
1966 } else {
1967 Must(swap_filen >= 0);
1968 Must(swap_dirn < Config.cacheSwap.n_configured);
1969 if (swapoutFailed()) {
1970 Must(EBIT_TEST(flags, RELEASE_REQUEST));
1971 } else {
1972 Must(swappingOut() || swappedOut());
1973 }
1974 }
1975 } catch (...) {
1976 debugs(88, DBG_IMPORTANT, "ERROR: inconsistent disk entry state " <<
1977 *this << "; problem: " << CurrentException);
1978 throw;
1979 }
1980 }
1981
1982 /*
1983 * return true if the entry is in a state where
1984 * it can accept more data (ie with write() method)
1985 */
1986 bool
1987 StoreEntry::isAccepting() const
1988 {
1989 if (STORE_PENDING != store_status)
1990 return false;
1991
1992 if (EBIT_TEST(flags, ENTRY_ABORTED))
1993 return false;
1994
1995 return true;
1996 }
1997
1998 const char *
1999 StoreEntry::describeTimestamps() const
2000 {
2001 LOCAL_ARRAY(char, buf, 256);
2002 snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2003 static_cast<int>(timestamp),
2004 static_cast<int>(lastref),
2005 static_cast<int>(lastModified_),
2006 static_cast<int>(expires));
2007 return buf;
2008 }
2009
2010 void
2011 StoreEntry::setCollapsingRequirement(const bool required)
2012 {
2013 if (required)
2014 EBIT_SET(flags, ENTRY_REQUIRES_COLLAPSING);
2015 else
2016 EBIT_CLR(flags, ENTRY_REQUIRES_COLLAPSING);
2017 }
2018
2019 static std::ostream &
2020 operator <<(std::ostream &os, const Store::IoStatus &io)
2021 {
2022 switch (io) {
2023 case Store::ioUndecided:
2024 os << 'u';
2025 break;
2026 case Store::ioReading:
2027 os << 'r';
2028 break;
2029 case Store::ioWriting:
2030 os << 'w';
2031 break;
2032 case Store::ioDone:
2033 os << 'o';
2034 break;
2035 }
2036 return os;
2037 }
2038
2039 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2040 {
2041 os << "e:";
2042
2043 if (e.hasTransients()) {
2044 const auto &xitTable = e.mem_obj->xitTable;
2045 os << 't' << xitTable.io << xitTable.index;
2046 }
2047
2048 if (e.hasMemStore()) {
2049 const auto &memCache = e.mem_obj->memCache;
2050 os << 'm' << memCache.io << memCache.index << '@' << memCache.offset;
2051 }
2052
2053 // Do not use e.hasDisk() here because its checkDisk() call may calls us.
2054 if (e.swap_filen > -1 || e.swap_dirn > -1)
2055 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2056
2057 os << '=';
2058
2059 // print only non-default status values, using unique letters
2060 if (e.mem_status != NOT_IN_MEMORY ||
2061 e.store_status != STORE_PENDING ||
2062 e.swap_status != SWAPOUT_NONE ||
2063 e.ping_status != PING_NONE) {
2064 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2065 if (e.store_status != STORE_PENDING) os << 's';
2066 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2067 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2068 }
2069
2070 // print only set flags, using unique letters
2071 if (e.flags) {
2072 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2073 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
2074 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2075 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2076 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2077 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
2078 if (EBIT_TEST(e.flags, KEY_PRIVATE)) {
2079 os << 'I';
2080 if (e.shareableWhenPrivate)
2081 os << 'H';
2082 }
2083 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2084 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2085 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2086 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2087 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2088 if (EBIT_TEST(e.flags, ENTRY_REQUIRES_COLLAPSING)) os << 'C';
2089 }
2090
2091 return os << '/' << &e << '*' << e.locks();
2092 }
2093
2094 void
2095 Store::EntryGuard::onException() noexcept
2096 {
2097 SWALLOW_EXCEPTIONS({
2098 entry_->releaseRequest(false);
2099 entry_->unlock(context_);
2100 });
2101 }
2102