]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
d974882f3be3bb31197f7ba6403e49ce27b33e7e
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
16 #include "ETag.h"
17 #include "event.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "http.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
23 #include "mem_node.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
33 #include "stmem.h"
34 #include "Store.h"
35 #include "store/Controller.h"
36 #include "store/Disk.h"
37 #include "store/Disks.h"
38 #include "store_digest.h"
39 #include "store_key_md5.h"
40 #include "store_log.h"
41 #include "store_rebuild.h"
42 #include "StoreClient.h"
43 #include "StoreIOState.h"
44 #include "StoreMeta.h"
45 #include "StrList.h"
46 #include "swap_log_op.h"
47 #include "tools.h"
48 #if USE_DELAY_POOLS
49 #include "DelayPools.h"
50 #endif
51
52 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
53 * XXX: convert to MEMPROXY_CLASS() API
54 */
55 #include "mem/Pool.h"
56
57 #include <climits>
58 #include <stack>
59
60 #define REBUILD_TIMESTAMP_DELTA_MAX 2
61
62 #define STORE_IN_MEM_BUCKETS (229)
63
64 /** \todo Convert these string constants to enum string-arrays generated */
65
66 const char *memStatusStr[] = {
67 "NOT_IN_MEMORY",
68 "IN_MEMORY"
69 };
70
71 const char *pingStatusStr[] = {
72 "PING_NONE",
73 "PING_WAITING",
74 "PING_DONE"
75 };
76
77 const char *storeStatusStr[] = {
78 "STORE_OK",
79 "STORE_PENDING"
80 };
81
82 const char *swapStatusStr[] = {
83 "SWAPOUT_NONE",
84 "SWAPOUT_WRITING",
85 "SWAPOUT_DONE"
86 };
87
88 /*
89 * This defines an repl type
90 */
91
92 typedef struct _storerepl_entry storerepl_entry_t;
93
94 struct _storerepl_entry {
95 const char *typestr;
96 REMOVALPOLICYCREATE *create;
97 };
98
99 static storerepl_entry_t *storerepl_list = NULL;
100
101 /*
102 * local function prototypes
103 */
104 static int getKeyCounter(void);
105 static OBJH storeCheckCachableStats;
106 static EVH storeLateRelease;
107
108 /*
109 * local variables
110 */
111 static std::stack<StoreEntry*> LateReleaseStack;
112 MemAllocator *StoreEntry::pool = NULL;
113
114 void
115 Store::Stats(StoreEntry * output)
116 {
117 assert(output);
118 Root().stat(*output);
119 }
120
121 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
122 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
123 void *
124 StoreEntry::operator new (size_t bytecount)
125 {
126 assert(bytecount == sizeof (StoreEntry));
127
128 if (!pool) {
129 pool = memPoolCreate ("StoreEntry", bytecount);
130 }
131
132 return pool->alloc();
133 }
134
135 void
136 StoreEntry::operator delete (void *address)
137 {
138 pool->freeOne(address);
139 }
140
141 void
142 StoreEntry::makePublic(const KeyScope scope)
143 {
144 /* This object can be cached for a long time */
145 if (!EBIT_TEST(flags, RELEASE_REQUEST))
146 setPublicKey(scope);
147 }
148
149 void
150 StoreEntry::makePrivate()
151 {
152 /* This object should never be cached at all */
153 expireNow();
154 releaseRequest(); /* delete object when not used */
155 }
156
157 void
158 StoreEntry::cacheNegatively()
159 {
160 /* This object may be negatively cached */
161 negativeCache();
162 makePublic();
163 }
164
165 size_t
166 StoreEntry::inUseCount()
167 {
168 if (!pool)
169 return 0;
170 return pool->getInUseCount();
171 }
172
173 const char *
174 StoreEntry::getMD5Text() const
175 {
176 return storeKeyText((const cache_key *)key);
177 }
178
179 #include "comm.h"
180
181 void
182 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
183 {
184 StoreEntry *anEntry = (StoreEntry *)theContext;
185 anEntry->delayAwareRead(aRead.conn,
186 aRead.buf,
187 aRead.len,
188 aRead.callback);
189 }
190
191 void
192 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
193 {
194 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
195 /* sketch: readdeferer* = getdeferer.
196 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
197 */
198
199 if (amountToRead == 0) {
200 assert (mem_obj);
201 /* read ahead limit */
202 /* Perhaps these two calls should both live in MemObject */
203 #if USE_DELAY_POOLS
204 if (!mem_obj->readAheadPolicyCanRead()) {
205 #endif
206 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
207 return;
208 #if USE_DELAY_POOLS
209 }
210
211 /* delay id limit */
212 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
213 return;
214
215 #endif
216
217 }
218
219 if (fd_table[conn->fd].closing()) {
220 // Readers must have closing callbacks if they want to be notified. No
221 // readers appeared to care around 2009/12/14 as they skipped reading
222 // for other reasons. Closing may already be true at the delyaAwareRead
223 // call time or may happen while we wait after delayRead() above.
224 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
225 callback);
226 return; // the read callback will never be called
227 }
228
229 comm_read(conn, buf, amountToRead, callback);
230 }
231
232 size_t
233 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
234 {
235 if (mem_obj == NULL)
236 return aRange.end;
237
238 #if URL_CHECKSUM_DEBUG
239
240 mem_obj->checkUrlChecksum();
241
242 #endif
243
244 if (!mem_obj->readAheadPolicyCanRead())
245 return 0;
246
247 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
248 }
249
250 bool
251 StoreEntry::checkDeferRead(int) const
252 {
253 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
254 }
255
256 void
257 StoreEntry::setNoDelay(bool const newValue)
258 {
259 if (mem_obj)
260 mem_obj->setNoDelay(newValue);
261 }
262
263 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
264 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
265 // It does not mean we will read from disk exclusively (or at all!).
266 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
267 // XXX: Collapsed clients cannot predict their type.
268 store_client_t
269 StoreEntry::storeClientType() const
270 {
271 /* The needed offset isn't in memory
272 * XXX TODO: this is wrong for range requests
273 * as the needed offset may *not* be 0, AND
274 * offset 0 in the memory object is the HTTP headers.
275 */
276
277 assert(mem_obj);
278
279 if (mem_obj->inmem_lo)
280 return STORE_DISK_CLIENT;
281
282 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
283 /* I don't think we should be adding clients to aborted entries */
284 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
285 return STORE_MEM_CLIENT;
286 }
287
288 if (store_status == STORE_OK) {
289 /* the object has completed. */
290
291 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
292 if (swap_status == SWAPOUT_DONE) {
293 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
294 if (mem_obj->endOffset() == mem_obj->object_sz) {
295 /* hot object fully swapped in (XXX: or swapped out?) */
296 return STORE_MEM_CLIENT;
297 }
298 } else {
299 /* Memory-only, or currently being swapped out */
300 return STORE_MEM_CLIENT;
301 }
302 }
303 return STORE_DISK_CLIENT;
304 }
305
306 /* here and past, entry is STORE_PENDING */
307 /*
308 * If this is the first client, let it be the mem client
309 */
310 if (mem_obj->nclients == 1)
311 return STORE_MEM_CLIENT;
312
313 /*
314 * If there is no disk file to open yet, we must make this a
315 * mem client. If we can't open the swapin file before writing
316 * to the client, there is no guarantee that we will be able
317 * to open it later when we really need it.
318 */
319 if (swap_status == SWAPOUT_NONE)
320 return STORE_MEM_CLIENT;
321
322 /*
323 * otherwise, make subsequent clients read from disk so they
324 * can not delay the first, and vice-versa.
325 */
326 return STORE_DISK_CLIENT;
327 }
328
329 StoreEntry::StoreEntry() :
330 mem_obj(NULL),
331 timestamp(-1),
332 lastref(-1),
333 expires(-1),
334 lastModified_(-1),
335 swap_file_sz(0),
336 refcount(0),
337 flags(0),
338 swap_filen(-1),
339 swap_dirn(-1),
340 mem_status(NOT_IN_MEMORY),
341 ping_status(PING_NONE),
342 store_status(STORE_PENDING),
343 swap_status(SWAPOUT_NONE),
344 lock_count(0)
345 {
346 debugs(20, 5, "StoreEntry constructed, this=" << this);
347 }
348
349 StoreEntry::~StoreEntry()
350 {
351 debugs(20, 5, "StoreEntry destructed, this=" << this);
352 }
353
354 #if USE_ADAPTATION
355 void
356 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
357 {
358 if (!deferredProducer)
359 deferredProducer = producer;
360 else
361 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
362 *deferredProducer << ", requested call: " << *producer);
363 }
364
365 void
366 StoreEntry::kickProducer()
367 {
368 if (deferredProducer != NULL) {
369 ScheduleCallHere(deferredProducer);
370 deferredProducer = NULL;
371 }
372 }
373 #endif
374
375 void
376 StoreEntry::destroyMemObject()
377 {
378 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
379
380 if (MemObject *mem = mem_obj) {
381 // Store::Root() is FATALly missing during shutdown
382 if (mem->xitTable.index >= 0 && !shutting_down)
383 Store::Root().transientsDisconnect(*mem);
384 if (mem->memCache.index >= 0 && !shutting_down)
385 Store::Root().memoryDisconnect(*this);
386
387 setMemStatus(NOT_IN_MEMORY);
388 mem_obj = NULL;
389 delete mem;
390 }
391 }
392
393 void
394 destroyStoreEntry(void *data)
395 {
396 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
397 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
398 assert(e != NULL);
399
400 if (e == NullStoreEntry::getInstance())
401 return;
402
403 // Store::Root() is FATALly missing during shutdown
404 if (e->swap_filen >= 0 && !shutting_down)
405 e->disk().disconnect(*e);
406
407 e->destroyMemObject();
408
409 e->hashDelete();
410
411 assert(e->key == NULL);
412
413 delete e;
414 }
415
416 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
417
418 void
419 StoreEntry::hashInsert(const cache_key * someKey)
420 {
421 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
422 key = storeKeyDup(someKey);
423 hash_join(store_table, this);
424 }
425
426 void
427 StoreEntry::hashDelete()
428 {
429 if (key) { // some test cases do not create keys and do not hashInsert()
430 hash_remove_link(store_table, this);
431 storeKeyFree((const cache_key *)key);
432 key = NULL;
433 }
434 }
435
436 /* -------------------------------------------------------------------------- */
437
438 /* get rid of memory copy of the object */
439 void
440 StoreEntry::purgeMem()
441 {
442 if (mem_obj == NULL)
443 return;
444
445 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
446
447 Store::Root().memoryUnlink(*this);
448
449 if (swap_status != SWAPOUT_DONE)
450 release();
451 }
452
453 void
454 StoreEntry::lock(const char *context)
455 {
456 ++lock_count;
457 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
458 }
459
460 void
461 StoreEntry::touch()
462 {
463 lastref = squid_curtime;
464 }
465
466 void
467 StoreEntry::setReleaseFlag()
468 {
469 if (EBIT_TEST(flags, RELEASE_REQUEST))
470 return;
471
472 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
473
474 EBIT_SET(flags, RELEASE_REQUEST);
475
476 Store::Root().markForUnlink(*this);
477 }
478
479 void
480 StoreEntry::releaseRequest()
481 {
482 if (EBIT_TEST(flags, RELEASE_REQUEST))
483 return;
484
485 setReleaseFlag(); // makes validToSend() false, preventing future hits
486
487 setPrivateKey();
488 }
489
490 int
491 StoreEntry::unlock(const char *context)
492 {
493 debugs(20, 3, (context ? context : "somebody") <<
494 " unlocking key " << getMD5Text() << ' ' << *this);
495 assert(lock_count > 0);
496 --lock_count;
497
498 if (lock_count)
499 return (int) lock_count;
500
501 if (store_status == STORE_PENDING)
502 setReleaseFlag();
503
504 assert(storePendingNClients(this) == 0);
505
506 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
507 this->release();
508 return 0;
509 }
510
511 if (EBIT_TEST(flags, KEY_PRIVATE))
512 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
513
514 Store::Root().handleIdleEntry(*this); // may delete us
515 return 0;
516 }
517
518 void
519 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
520 {
521 assert (aClient);
522 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
523
524 if (!result)
525 aClient->created (NullStoreEntry::getInstance());
526 else
527 aClient->created (result);
528 }
529
530 void
531 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
532 {
533 assert (aClient);
534 StoreEntry *result = storeGetPublicByRequest (request);
535
536 if (!result)
537 result = NullStoreEntry::getInstance();
538
539 aClient->created (result);
540 }
541
542 void
543 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
544 {
545 assert (aClient);
546 StoreEntry *result = storeGetPublic (uri, method);
547
548 if (!result)
549 result = NullStoreEntry::getInstance();
550
551 aClient->created (result);
552 }
553
554 StoreEntry *
555 storeGetPublic(const char *uri, const HttpRequestMethod& method)
556 {
557 return Store::Root().get(storeKeyPublic(uri, method));
558 }
559
560 StoreEntry *
561 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
562 {
563 return Store::Root().get(storeKeyPublicByRequestMethod(req, method, keyScope));
564 }
565
566 StoreEntry *
567 storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
568 {
569 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
570
571 if (e == NULL && req->method == Http::METHOD_HEAD)
572 /* We can generate a HEAD reply from a cached GET object */
573 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
574
575 return e;
576 }
577
578 static int
579 getKeyCounter(void)
580 {
581 static int key_counter = 0;
582
583 if (++key_counter < 0)
584 key_counter = 1;
585
586 return key_counter;
587 }
588
589 /* RBC 20050104 AFAICT this should become simpler:
590 * rather than reinserting with a special key it should be marked
591 * as 'released' and then cleaned up when refcounting indicates.
592 * the StoreHashIndex could well implement its 'released' in the
593 * current manner.
594 * Also, clean log writing should skip over ia,t
595 * Otherwise, we need a 'remove from the index but not the store
596 * concept'.
597 */
598 void
599 StoreEntry::setPrivateKey()
600 {
601 if (key && EBIT_TEST(flags, KEY_PRIVATE))
602 return; /* is already private */
603
604 if (key) {
605 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
606
607 // TODO: move into SwapDir::markForUnlink() already called by Root()
608 if (swap_filen > -1)
609 storeDirSwapLog(this, SWAP_LOG_DEL);
610
611 hashDelete();
612 }
613
614 if (mem_obj && mem_obj->hasUris())
615 mem_obj->id = getKeyCounter();
616 const cache_key *newkey = storeKeyPrivate();
617
618 assert(hash_lookup(store_table, newkey) == NULL);
619 EBIT_SET(flags, KEY_PRIVATE);
620 hashInsert(newkey);
621 }
622
623 void
624 StoreEntry::setPublicKey(const KeyScope scope)
625 {
626 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
627 return; /* is already public */
628
629 assert(mem_obj);
630
631 /*
632 * We can't make RELEASE_REQUEST objects public. Depending on
633 * when RELEASE_REQUEST gets set, we might not be swapping out
634 * the object. If we're not swapping out, then subsequent
635 * store clients won't be able to access object data which has
636 * been freed from memory.
637 *
638 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
639 */
640 #if MORE_DEBUG_OUTPUT
641
642 if (EBIT_TEST(flags, RELEASE_REQUEST))
643 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
644
645 #endif
646
647 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
648
649 adjustVary();
650 forcePublicKey(calcPublicKey(scope));
651 }
652
653 void
654 StoreEntry::clearPublicKeyScope()
655 {
656 if (!key || EBIT_TEST(flags, KEY_PRIVATE))
657 return; // probably the old public key was deleted or made private
658
659 // TODO: adjustVary() when collapsed revalidation supports that
660
661 const cache_key *newKey = calcPublicKey(ksDefault);
662 if (!storeKeyHashCmp(key, newKey))
663 return; // probably another collapsed revalidation beat us to this change
664
665 forcePublicKey(newKey);
666 }
667
668 /// Unconditionally sets public key for this store entry.
669 /// Releases the old entry with the same public key (if any).
670 void
671 StoreEntry::forcePublicKey(const cache_key *newkey)
672 {
673 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
674 assert(e2 != this);
675 debugs(20, 3, "Making old " << *e2 << " private.");
676 e2->setPrivateKey();
677 e2->release();
678 }
679
680 if (key)
681 hashDelete();
682
683 EBIT_CLR(flags, KEY_PRIVATE);
684
685 hashInsert(newkey);
686
687 if (swap_filen > -1)
688 storeDirSwapLog(this, SWAP_LOG_ADD);
689 }
690
691 /// Calculates correct public key for feeding forcePublicKey().
692 /// Assumes adjustVary() has been called for this entry already.
693 const cache_key *
694 StoreEntry::calcPublicKey(const KeyScope keyScope)
695 {
696 assert(mem_obj);
697 return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
698 storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
699 }
700
701 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
702 /// The vary_headers field is used to calculate the Vary marker key.
703 /// Releases the old Vary marker with an outdated key (if any).
704 void
705 StoreEntry::adjustVary()
706 {
707 assert(mem_obj);
708
709 if (!mem_obj->request)
710 return;
711
712 HttpRequestPointer request(mem_obj->request);
713
714 if (mem_obj->vary_headers.isEmpty()) {
715 /* First handle the case where the object no longer varies */
716 request->vary_headers.clear();
717 } else {
718 if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
719 /* Oops.. the variance has changed. Kill the base object
720 * to record the new variance key
721 */
722 request->vary_headers.clear(); /* free old "bad" variance key */
723 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
724 pe->release();
725 }
726
727 /* Make sure the request knows the variance status */
728 if (request->vary_headers.isEmpty())
729 request->vary_headers = httpMakeVaryMark(request.getRaw(), mem_obj->getReply().getRaw());
730 }
731
732 // TODO: storeGetPublic() calls below may create unlocked entries.
733 // We should add/use storeHas() API or lock/unlock those entries.
734 if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
735 /* Create "vary" base object */
736 String vary;
737 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
738 /* We are allowed to do this typecast */
739 HttpReply *rep = new HttpReply;
740 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
741 vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
742
743 if (vary.size()) {
744 /* Again, we own this structure layout */
745 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
746 vary.clean();
747 }
748
749 #if X_ACCELERATOR_VARY
750 vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
751
752 if (vary.size() > 0) {
753 /* Again, we own this structure layout */
754 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
755 vary.clean();
756 }
757
758 #endif
759 pe->replaceHttpReply(rep, false); // no write until key is public
760
761 pe->timestampsSet();
762
763 pe->makePublic();
764
765 pe->startWriting(); // after makePublic()
766
767 pe->complete();
768
769 pe->unlock("StoreEntry::forcePublicKey+Vary");
770 }
771 }
772
773 StoreEntry *
774 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
775 {
776 StoreEntry *e = NULL;
777 debugs(20, 3, "storeCreateEntry: '" << url << "'");
778
779 e = new StoreEntry();
780 e->makeMemObject();
781 e->mem_obj->setUris(url, log_url, method);
782
783 if (flags.cachable) {
784 EBIT_CLR(e->flags, RELEASE_REQUEST);
785 } else {
786 e->releaseRequest();
787 }
788
789 e->store_status = STORE_PENDING;
790 e->refcount = 0;
791 e->lastref = squid_curtime;
792 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
793 e->ping_status = PING_NONE;
794 EBIT_SET(e->flags, ENTRY_VALIDATED);
795 return e;
796 }
797
798 StoreEntry *
799 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
800 {
801 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
802 e->lock("storeCreateEntry");
803
804 if (neighbors_do_private_keys || !flags.hierarchical)
805 e->setPrivateKey();
806 else
807 e->setPublicKey();
808
809 return e;
810 }
811
812 /* Mark object as expired */
813 void
814 StoreEntry::expireNow()
815 {
816 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
817 expires = squid_curtime;
818 }
819
820 void
821 StoreEntry::write (StoreIOBuffer writeBuffer)
822 {
823 assert(mem_obj != NULL);
824 /* This assert will change when we teach the store to update */
825 PROF_start(StoreEntry_write);
826 assert(store_status == STORE_PENDING);
827
828 // XXX: caller uses content offset, but we also store headers
829 if (const HttpReplyPointer reply = mem_obj->getReply())
830 writeBuffer.offset += reply->hdr_sz;
831
832 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
833 PROF_stop(StoreEntry_write);
834 storeGetMemSpace(writeBuffer.length);
835 mem_obj->write(writeBuffer);
836
837 if (!EBIT_TEST(flags, DELAY_SENDING))
838 invokeHandlers();
839 }
840
841 /* Append incoming data from a primary server to an entry. */
842 void
843 StoreEntry::append(char const *buf, int len)
844 {
845 assert(mem_obj != NULL);
846 assert(len >= 0);
847 assert(store_status == STORE_PENDING);
848
849 StoreIOBuffer tempBuffer;
850 tempBuffer.data = (char *)buf;
851 tempBuffer.length = len;
852 /*
853 * XXX sigh, offset might be < 0 here, but it gets "corrected"
854 * later. This offset crap is such a mess.
855 */
856 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
857 write(tempBuffer);
858 }
859
860 void
861 StoreEntry::vappendf(const char *fmt, va_list vargs)
862 {
863 LOCAL_ARRAY(char, buf, 4096);
864 *buf = 0;
865 int x;
866
867 #ifdef VA_COPY
868 va_args ap;
869 /* Fix of bug 753r. The value of vargs is undefined
870 * after vsnprintf() returns. Make a copy of vargs
871 * incase we loop around and call vsnprintf() again.
872 */
873 VA_COPY(ap,vargs);
874 errno = 0;
875 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
876 fatal(xstrerr(errno));
877 return;
878 }
879 va_end(ap);
880 #else /* VA_COPY */
881 errno = 0;
882 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
883 fatal(xstrerr(errno));
884 return;
885 }
886 #endif /*VA_COPY*/
887
888 if (x < static_cast<int>(sizeof(buf))) {
889 append(buf, x);
890 return;
891 }
892
893 // okay, do it the slow way.
894 char *buf2 = new char[x+1];
895 int y = vsnprintf(buf2, x+1, fmt, vargs);
896 assert(y >= 0 && y == x);
897 append(buf2, y);
898 delete[] buf2;
899 }
900
901 // deprecated. use StoreEntry::appendf() instead.
902 void
903 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
904 {
905 va_list args;
906 va_start(args, fmt);
907 e->vappendf(fmt, args);
908 va_end(args);
909 }
910
911 // deprecated. use StoreEntry::appendf() instead.
912 void
913 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
914 {
915 e->vappendf(fmt, vargs);
916 }
917
918 struct _store_check_cachable_hist {
919
920 struct {
921 int non_get;
922 int not_entry_cachable;
923 int wrong_content_length;
924 int negative_cached;
925 int too_big;
926 int too_small;
927 int private_key;
928 int too_many_open_files;
929 int too_many_open_fds;
930 int missing_parts;
931 } no;
932
933 struct {
934 int Default;
935 } yes;
936 } store_check_cachable_hist;
937
938 int
939 storeTooManyDiskFilesOpen(void)
940 {
941 if (Config.max_open_disk_fds == 0)
942 return 0;
943
944 if (store_open_disk_fd > Config.max_open_disk_fds)
945 return 1;
946
947 return 0;
948 }
949
950 int
951 StoreEntry::checkTooSmall()
952 {
953 if (EBIT_TEST(flags, ENTRY_SPECIAL))
954 return 0;
955
956 if (STORE_OK == store_status)
957 if (mem_obj->object_sz >= 0 &&
958 mem_obj->object_sz < Config.Store.minObjectSize)
959 return 1;
960 if (getReply()->content_length > -1)
961 if (getReply()->content_length < Config.Store.minObjectSize)
962 return 1;
963 return 0;
964 }
965
966 bool
967 StoreEntry::checkTooBig() const
968 {
969 if (mem_obj->endOffset() > store_maxobjsize)
970 return true;
971
972 if (getReply()->content_length < 0)
973 return false;
974
975 return (getReply()->content_length > store_maxobjsize);
976 }
977
978 // TODO: move "too many open..." checks outside -- we are called too early/late
979 bool
980 StoreEntry::checkCachable()
981 {
982 // XXX: This method is used for both memory and disk caches, but some
983 // checks are specific to disk caches. Move them to mayStartSwapOut().
984
985 // XXX: This method may be called several times, sometimes with different
986 // outcomes, making store_check_cachable_hist counters misleading.
987
988 // check this first to optimize handling of repeated calls for uncachables
989 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
991 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
992 return 0; // avoid rerequesting release below
993 }
994
995 #if CACHE_ALL_METHODS
996
997 if (mem_obj->method != Http::METHOD_GET) {
998 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
999 ++store_check_cachable_hist.no.non_get;
1000 } else
1001 #endif
1002 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
1003 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
1004 ++store_check_cachable_hist.no.wrong_content_length;
1005 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
1006 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
1007 ++store_check_cachable_hist.no.negative_cached;
1008 return 0; /* avoid release call below */
1009 } else if (!mem_obj || !getReply()) {
1010 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1011 // this segfault protection, but how can we get such a HIT?
1012 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1013 ++store_check_cachable_hist.no.missing_parts;
1014 } else if (checkTooBig()) {
1015 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1016 ++store_check_cachable_hist.no.too_big;
1017 } else if (checkTooSmall()) {
1018 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1019 ++store_check_cachable_hist.no.too_small;
1020 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1021 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1022 ++store_check_cachable_hist.no.private_key;
1023 } else if (swap_status != SWAPOUT_NONE) {
1024 /*
1025 * here we checked the swap_status because the remaining
1026 * cases are only relevant only if we haven't started swapping
1027 * out the object yet.
1028 */
1029 return 1;
1030 } else if (storeTooManyDiskFilesOpen()) {
1031 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1032 ++store_check_cachable_hist.no.too_many_open_files;
1033 } else if (fdNFree() < RESERVED_FD) {
1034 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1035 ++store_check_cachable_hist.no.too_many_open_fds;
1036 } else {
1037 ++store_check_cachable_hist.yes.Default;
1038 return 1;
1039 }
1040
1041 releaseRequest();
1042 return 0;
1043 }
1044
1045 void
1046 storeCheckCachableStats(StoreEntry *sentry)
1047 {
1048 storeAppendPrintf(sentry, "Category\t Count\n");
1049
1050 #if CACHE_ALL_METHODS
1051
1052 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1053 store_check_cachable_hist.no.non_get);
1054 #endif
1055
1056 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1057 store_check_cachable_hist.no.not_entry_cachable);
1058 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1059 store_check_cachable_hist.no.wrong_content_length);
1060 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1061 store_check_cachable_hist.no.negative_cached);
1062 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1063 store_check_cachable_hist.no.missing_parts);
1064 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1065 store_check_cachable_hist.no.too_big);
1066 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1067 store_check_cachable_hist.no.too_small);
1068 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1069 store_check_cachable_hist.no.private_key);
1070 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1071 store_check_cachable_hist.no.too_many_open_files);
1072 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1073 store_check_cachable_hist.no.too_many_open_fds);
1074 storeAppendPrintf(sentry, "yes.default\t%d\n",
1075 store_check_cachable_hist.yes.Default);
1076 }
1077
1078 void
1079 StoreEntry::lengthWentBad(const char *reason)
1080 {
1081 debugs(20, 3, "because " << reason << ": " << *this);
1082 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1083 releaseRequest();
1084 }
1085
1086 void
1087 StoreEntry::complete()
1088 {
1089 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1090
1091 if (store_status != STORE_PENDING) {
1092 /*
1093 * if we're not STORE_PENDING, then probably we got aborted
1094 * and there should be NO clients on this entry
1095 */
1096 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1097 assert(mem_obj->nclients == 0);
1098 return;
1099 }
1100
1101 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1102 * in use of object_sz?
1103 */
1104 mem_obj->object_sz = mem_obj->endOffset();
1105
1106 store_status = STORE_OK;
1107
1108 assert(mem_status == NOT_IN_MEMORY);
1109
1110 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1111 lengthWentBad("!validLength() in complete()");
1112
1113 #if USE_CACHE_DIGESTS
1114 if (mem_obj->request)
1115 mem_obj->request->hier.store_complete_stop = current_time;
1116
1117 #endif
1118 /*
1119 * We used to call invokeHandlers, then storeSwapOut. However,
1120 * Madhukar Reddy <myreddy@persistence.com> reported that
1121 * responses without content length would sometimes get released
1122 * in client_side, thinking that the response is incomplete.
1123 */
1124 invokeHandlers();
1125 }
1126
1127 /*
1128 * Someone wants to abort this transfer. Set the reason in the
1129 * request structure, call the callback and mark the
1130 * entry for releasing
1131 */
1132 void
1133 StoreEntry::abort()
1134 {
1135 ++statCounter.aborted_requests;
1136 assert(store_status == STORE_PENDING);
1137 assert(mem_obj != NULL);
1138 debugs(20, 6, "storeAbort: " << getMD5Text());
1139
1140 lock("StoreEntry::abort"); /* lock while aborting */
1141 negativeCache();
1142
1143 releaseRequest();
1144
1145 EBIT_SET(flags, ENTRY_ABORTED);
1146
1147 setMemStatus(NOT_IN_MEMORY);
1148
1149 store_status = STORE_OK;
1150
1151 /* Notify the server side */
1152
1153 /*
1154 * DPW 2007-05-07
1155 * Should we check abort.data for validity?
1156 */
1157 if (mem_obj->abort.callback) {
1158 if (!cbdataReferenceValid(mem_obj->abort.data))
1159 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1160 eventAdd("mem_obj->abort.callback",
1161 mem_obj->abort.callback,
1162 mem_obj->abort.data,
1163 0.0,
1164 true);
1165 unregisterAbort();
1166 }
1167
1168 /* XXX Should we reverse these two, so that there is no
1169 * unneeded disk swapping triggered?
1170 */
1171 /* Notify the client side */
1172 invokeHandlers();
1173
1174 // abort swap out, invalidating what was created so far (release follows)
1175 swapOutFileClose(StoreIOState::writerGone);
1176
1177 unlock("StoreEntry::abort"); /* unlock */
1178 }
1179
1180 /**
1181 * Clear Memory storage to accommodate the given object len
1182 */
1183 void
1184 storeGetMemSpace(int size)
1185 {
1186 PROF_start(storeGetMemSpace);
1187 StoreEntry *e = NULL;
1188 int released = 0;
1189 static time_t last_check = 0;
1190 size_t pages_needed;
1191 RemovalPurgeWalker *walker;
1192
1193 if (squid_curtime == last_check) {
1194 PROF_stop(storeGetMemSpace);
1195 return;
1196 }
1197
1198 last_check = squid_curtime;
1199
1200 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1201
1202 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1203 PROF_stop(storeGetMemSpace);
1204 return;
1205 }
1206
1207 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1208 " pages");
1209
1210 /* XXX what to set as max_scan here? */
1211 walker = mem_policy->PurgeInit(mem_policy, 100000);
1212
1213 while ((e = walker->Next(walker))) {
1214 e->purgeMem();
1215 ++released;
1216
1217 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1218 break;
1219 }
1220
1221 walker->Done(walker);
1222 debugs(20, 3, "storeGetMemSpace stats:");
1223 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1224 debugs(20, 3, " " << std::setw(6) << released << " were released");
1225 PROF_stop(storeGetMemSpace);
1226 }
1227
1228 /* thunk through to Store::Root().maintain(). Note that this would be better still
1229 * if registered against the root store itself, but that requires more complex
1230 * update logic - bigger fish to fry first. Long term each store when
1231 * it becomes active will self register
1232 */
1233 void
1234 Store::Maintain(void *)
1235 {
1236 Store::Root().maintain();
1237
1238 /* Reregister a maintain event .. */
1239 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1240
1241 }
1242
1243 /* The maximum objects to scan for maintain storage space */
1244 #define MAINTAIN_MAX_SCAN 1024
1245 #define MAINTAIN_MAX_REMOVE 64
1246
1247 /* release an object from a cache */
1248 void
1249 StoreEntry::release()
1250 {
1251 PROF_start(storeRelease);
1252 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1253 /* If, for any reason we can't discard this object because of an
1254 * outstanding request, mark it for pending release */
1255
1256 if (locked()) {
1257 expireNow();
1258 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1259 releaseRequest();
1260 PROF_stop(storeRelease);
1261 return;
1262 }
1263
1264 if (Store::Controller::store_dirs_rebuilding && swap_filen > -1) {
1265 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1266
1267 Store::Root().memoryUnlink(*this);
1268
1269 setPrivateKey();
1270
1271 // lock the entry until rebuilding is done
1272 lock("storeLateRelease");
1273 setReleaseFlag();
1274 LateReleaseStack.push(this);
1275 return;
1276 }
1277
1278 storeLog(STORE_LOG_RELEASE, this);
1279 if (swap_filen > -1 && !EBIT_TEST(flags, KEY_PRIVATE)) {
1280 // log before unlink() below clears swap_filen
1281 storeDirSwapLog(this, SWAP_LOG_DEL);
1282 }
1283
1284 Store::Root().unlink(*this);
1285 destroyStoreEntry(static_cast<hash_link *>(this));
1286 PROF_stop(storeRelease);
1287 }
1288
1289 static void
1290 storeLateRelease(void *)
1291 {
1292 StoreEntry *e;
1293 static int n = 0;
1294
1295 if (Store::Controller::store_dirs_rebuilding) {
1296 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1297 return;
1298 }
1299
1300 // TODO: this works but looks unelegant.
1301 for (int i = 0; i < 10; ++i) {
1302 if (LateReleaseStack.empty()) {
1303 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1304 return;
1305 } else {
1306 e = LateReleaseStack.top();
1307 LateReleaseStack.pop();
1308 }
1309
1310 e->unlock("storeLateRelease");
1311 ++n;
1312 }
1313
1314 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1315 }
1316
1317 /* return 1 if a store entry is locked */
1318 int
1319 StoreEntry::locked() const
1320 {
1321 if (lock_count)
1322 return 1;
1323
1324 /*
1325 * SPECIAL, PUBLIC entries should be "locked";
1326 * XXX: Their owner should lock them then instead of relying on this hack.
1327 */
1328 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1329 if (!EBIT_TEST(flags, KEY_PRIVATE))
1330 return 1;
1331
1332 return 0;
1333 }
1334
1335 bool
1336 StoreEntry::validLength() const
1337 {
1338 int64_t diff;
1339 const HttpReply *reply;
1340 assert(mem_obj != NULL);
1341 reply = getReply();
1342 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1343 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1344 objectLen());
1345 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1346 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1347
1348 if (reply->content_length < 0) {
1349 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1350 return 1;
1351 }
1352
1353 if (reply->hdr_sz == 0) {
1354 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1355 return 1;
1356 }
1357
1358 if (mem_obj->method == Http::METHOD_HEAD) {
1359 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1360 return 1;
1361 }
1362
1363 if (reply->sline.status() == Http::scNotModified)
1364 return 1;
1365
1366 if (reply->sline.status() == Http::scNoContent)
1367 return 1;
1368
1369 diff = reply->hdr_sz + reply->content_length - objectLen();
1370
1371 if (diff == 0)
1372 return 1;
1373
1374 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1375
1376 return 0;
1377 }
1378
1379 static void
1380 storeRegisterWithCacheManager(void)
1381 {
1382 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1383 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1384 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1385 storeCheckCachableStats, 0, 1);
1386 }
1387
1388 void
1389 storeInit(void)
1390 {
1391 storeKeyInit();
1392 mem_policy = createRemovalPolicy(Config.memPolicy);
1393 storeDigestInit();
1394 storeLogOpen();
1395 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1396 Store::Root().init();
1397 storeRebuildStart();
1398
1399 storeRegisterWithCacheManager();
1400 }
1401
1402 void
1403 storeConfigure(void)
1404 {
1405 Store::Root().updateLimits();
1406 }
1407
1408 bool
1409 StoreEntry::memoryCachable()
1410 {
1411 if (!checkCachable())
1412 return 0;
1413
1414 if (mem_obj == NULL)
1415 return 0;
1416
1417 if (mem_obj->data_hdr.size() == 0)
1418 return 0;
1419
1420 if (mem_obj->inmem_lo != 0)
1421 return 0;
1422
1423 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1424 return 0;
1425
1426 return 1;
1427 }
1428
1429 int
1430 StoreEntry::checkNegativeHit() const
1431 {
1432 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1433 return 0;
1434
1435 if (expires <= squid_curtime)
1436 return 0;
1437
1438 if (store_status != STORE_OK)
1439 return 0;
1440
1441 return 1;
1442 }
1443
1444 /**
1445 * Set object for negative caching.
1446 * Preserves any expiry information given by the server.
1447 * In absence of proper expiry info it will set to expire immediately,
1448 * or with HTTP-violations enabled the configured negative-TTL is observed
1449 */
1450 void
1451 StoreEntry::negativeCache()
1452 {
1453 // XXX: should make the default for expires 0 instead of -1
1454 // so we can distinguish "Expires: -1" from nothing.
1455 if (expires <= 0)
1456 #if USE_HTTP_VIOLATIONS
1457 expires = squid_curtime + Config.negativeTtl;
1458 #else
1459 expires = squid_curtime;
1460 #endif
1461 EBIT_SET(flags, ENTRY_NEGCACHED);
1462 }
1463
1464 void
1465 storeFreeMemory(void)
1466 {
1467 Store::FreeMemory();
1468 #if USE_CACHE_DIGESTS
1469 delete store_digest;
1470 #endif
1471 store_digest = NULL;
1472 }
1473
1474 int
1475 expiresMoreThan(time_t expires, time_t when)
1476 {
1477 if (expires < 0) /* No Expires given */
1478 return 1;
1479
1480 return (expires > (squid_curtime + when));
1481 }
1482
1483 int
1484 StoreEntry::validToSend() const
1485 {
1486 if (EBIT_TEST(flags, RELEASE_REQUEST))
1487 return 0;
1488
1489 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1490 if (expires <= squid_curtime)
1491 return 0;
1492
1493 if (EBIT_TEST(flags, ENTRY_ABORTED))
1494 return 0;
1495
1496 // now check that the entry has a cache backing or is collapsed
1497 if (swap_filen > -1) // backed by a disk cache
1498 return 1;
1499
1500 if (swappingOut()) // will be backed by a disk cache
1501 return 1;
1502
1503 if (!mem_obj) // not backed by a memory cache and not collapsed
1504 return 0;
1505
1506 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1507 // disk cache backing that store_client constructor will assert. XXX: This
1508 // is wrong for range requests (that could feed off nibbled memory) and for
1509 // entries backed by the shared memory cache (that could, in theory, get
1510 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1511 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1512 return 0;
1513
1514 // The following check is correct but useless at this position. TODO: Move
1515 // it up when the shared memory cache can either replenish locally nibbled
1516 // bytes or, better, does not use local RAM copy at all.
1517 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1518 // return 1;
1519
1520 return 1;
1521 }
1522
1523 bool
1524 StoreEntry::timestampsSet()
1525 {
1526 const HttpReply *reply = getReply();
1527 time_t served_date = reply->date;
1528 int age = reply->header.getInt(Http::HdrType::AGE);
1529 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1530 /* make sure that 0 <= served_date <= squid_curtime */
1531
1532 if (served_date < 0 || served_date > squid_curtime)
1533 served_date = squid_curtime;
1534
1535 /* Bug 1791:
1536 * If the returned Date: is more than 24 hours older than
1537 * the squid_curtime, then one of us needs to use NTP to set our
1538 * clock. We'll pretend that our clock is right.
1539 */
1540 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1541 served_date = squid_curtime;
1542
1543 /*
1544 * Compensate with Age header if origin server clock is ahead
1545 * of us and there is a cache in between us and the origin
1546 * server. But DONT compensate if the age value is larger than
1547 * squid_curtime because it results in a negative served_date.
1548 */
1549 if (age > squid_curtime - served_date)
1550 if (squid_curtime > age)
1551 served_date = squid_curtime - age;
1552
1553 // compensate for Squid-to-server and server-to-Squid delays
1554 if (mem_obj && mem_obj->request) {
1555 const time_t request_sent =
1556 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1557 if (0 < request_sent && request_sent < squid_curtime)
1558 served_date -= (squid_curtime - request_sent);
1559 }
1560
1561 time_t exp = 0;
1562 if (reply->expires > 0 && reply->date > -1)
1563 exp = served_date + (reply->expires - reply->date);
1564 else
1565 exp = reply->expires;
1566
1567 if (timestamp == served_date && expires == exp) {
1568 // if the reply lacks LMT, then we now know that our effective
1569 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1570 // new modification times must match
1571 if (reply->last_modified < 0 || reply->last_modified == lastModified())
1572 return false; // nothing has changed
1573 }
1574
1575 expires = exp;
1576
1577 lastModified_ = reply->last_modified;
1578
1579 timestamp = served_date;
1580
1581 return true;
1582 }
1583
1584 void
1585 StoreEntry::registerAbort(STABH * cb, void *data)
1586 {
1587 assert(mem_obj);
1588 assert(mem_obj->abort.callback == NULL);
1589 mem_obj->abort.callback = cb;
1590 mem_obj->abort.data = cbdataReference(data);
1591 }
1592
1593 void
1594 StoreEntry::unregisterAbort()
1595 {
1596 assert(mem_obj);
1597 if (mem_obj->abort.callback) {
1598 mem_obj->abort.callback = NULL;
1599 cbdataReferenceDone(mem_obj->abort.data);
1600 }
1601 }
1602
1603 void
1604 StoreEntry::dump(int l) const
1605 {
1606 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1607 debugs(20, l, "StoreEntry->next: " << next);
1608 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1609 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1610 debugs(20, l, "StoreEntry->lastref: " << lastref);
1611 debugs(20, l, "StoreEntry->expires: " << expires);
1612 debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
1613 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1614 debugs(20, l, "StoreEntry->refcount: " << refcount);
1615 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1616 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1617 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1618 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1619 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1620 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1621 debugs(20, l, "StoreEntry->store_status: " << store_status);
1622 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1623 }
1624
1625 /*
1626 * NOTE, this function assumes only two mem states
1627 */
1628 void
1629 StoreEntry::setMemStatus(mem_status_t new_status)
1630 {
1631 if (new_status == mem_status)
1632 return;
1633
1634 // are we using a shared memory cache?
1635 if (Config.memShared && IamWorkerProcess()) {
1636 // This method was designed to update replacement policy, not to
1637 // actually purge something from the memory cache (TODO: rename?).
1638 // Shared memory cache does not have a policy that needs updates.
1639 mem_status = new_status;
1640 return;
1641 }
1642
1643 assert(mem_obj != NULL);
1644
1645 if (new_status == IN_MEMORY) {
1646 assert(mem_obj->inmem_lo == 0);
1647
1648 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1649 debugs(20, 4, "not inserting special " << *this << " into policy");
1650 } else {
1651 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1652 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1653 }
1654
1655 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1656 } else {
1657 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1658 debugs(20, 4, "not removing special " << *this << " from policy");
1659 } else {
1660 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1661 debugs(20, 4, "removed " << *this);
1662 }
1663
1664 --hot_obj_count;
1665 }
1666
1667 mem_status = new_status;
1668 }
1669
1670 const char *
1671 StoreEntry::url() const
1672 {
1673 if (mem_obj == NULL)
1674 return "[null_mem_obj]";
1675 else
1676 return mem_obj->storeId();
1677 }
1678
1679 MemObject *
1680 StoreEntry::makeMemObject()
1681 {
1682 if (!mem_obj)
1683 mem_obj = new MemObject();
1684 return mem_obj;
1685 }
1686
1687 void
1688 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1689 {
1690 makeMemObject();
1691 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1692 }
1693
1694 /** disable sending content to the clients.
1695 *
1696 * This just sets DELAY_SENDING.
1697 */
1698 void
1699 StoreEntry::buffer()
1700 {
1701 EBIT_SET(flags, DELAY_SENDING);
1702 }
1703
1704 /** flush any buffered content.
1705 *
1706 * This just clears DELAY_SENDING and Invokes the handlers
1707 * to begin sending anything that may be buffered.
1708 */
1709 void
1710 StoreEntry::flush()
1711 {
1712 if (EBIT_TEST(flags, DELAY_SENDING)) {
1713 EBIT_CLR(flags, DELAY_SENDING);
1714 invokeHandlers();
1715 }
1716 }
1717
1718 int64_t
1719 StoreEntry::objectLen() const
1720 {
1721 assert(mem_obj != NULL);
1722 return mem_obj->object_sz;
1723 }
1724
1725 int64_t
1726 StoreEntry::contentLen() const
1727 {
1728 assert(mem_obj != NULL);
1729 assert(getReply() != NULL);
1730 return objectLen() - getReply()->hdr_sz;
1731 }
1732
1733 HttpReply const *
1734 StoreEntry::getReply() const
1735 {
1736 return (mem_obj ? mem_obj->getReply().getRaw() : nullptr);
1737 }
1738
1739 void
1740 StoreEntry::reset()
1741 {
1742 assert (mem_obj);
1743 debugs(20, 3, url());
1744 mem_obj->reset();
1745 expires = lastModified_ = timestamp = -1;
1746 }
1747
1748 /*
1749 * storeFsInit
1750 *
1751 * This routine calls the SETUP routine for each fs type.
1752 * I don't know where the best place for this is, and I'm not going to shuffle
1753 * around large chunks of code right now (that can be done once its working.)
1754 */
1755 void
1756 storeFsInit(void)
1757 {
1758 storeReplSetup();
1759 }
1760
1761 /*
1762 * called to add another store removal policy module
1763 */
1764 void
1765 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1766 {
1767 int i;
1768
1769 /* find the number of currently known repl types */
1770 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1771 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1772 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1773 return;
1774 }
1775 }
1776
1777 /* add the new type */
1778 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1779
1780 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1781
1782 storerepl_list[i].typestr = type;
1783
1784 storerepl_list[i].create = create;
1785 }
1786
1787 /*
1788 * Create a removal policy instance
1789 */
1790 RemovalPolicy *
1791 createRemovalPolicy(RemovalPolicySettings * settings)
1792 {
1793 storerepl_entry_t *r;
1794
1795 for (r = storerepl_list; r && r->typestr; ++r) {
1796 if (strcmp(r->typestr, settings->type) == 0)
1797 return r->create(settings->args);
1798 }
1799
1800 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1801 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1802 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1803 fatalf("ERROR: Unknown policy %s\n", settings->type);
1804 return NULL; /* NOTREACHED */
1805 }
1806
1807 #if 0
1808 void
1809 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1810 {
1811 if (e->swap_file_number == filn)
1812 return;
1813
1814 if (filn < 0) {
1815 assert(-1 == filn);
1816 storeDirMapBitReset(e->swap_file_number);
1817 storeDirLRUDelete(e);
1818 e->swap_file_number = -1;
1819 } else {
1820 assert(-1 == e->swap_file_number);
1821 storeDirMapBitSet(e->swap_file_number = filn);
1822 storeDirLRUAdd(e);
1823 }
1824 }
1825
1826 #endif
1827
1828 void
1829 StoreEntry::storeErrorResponse(HttpReply *reply)
1830 {
1831 lock("StoreEntry::storeErrorResponse");
1832 buffer();
1833 replaceHttpReply(reply);
1834 flush();
1835 complete();
1836 negativeCache();
1837 releaseRequest();
1838 unlock("StoreEntry::storeErrorResponse");
1839 }
1840
1841 /*
1842 * Replace a store entry with
1843 * a new reply. This eats the reply.
1844 */
1845 void
1846 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1847 {
1848 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1849
1850 if (!mem_obj) {
1851 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1852 return;
1853 }
1854
1855 mem_obj->replaceReply(HttpReplyPointer(rep));
1856
1857 if (andStartWriting)
1858 startWriting();
1859 }
1860
1861 void
1862 StoreEntry::startWriting()
1863 {
1864 /* TODO: when we store headers separately remove the header portion */
1865 /* TODO: mark the length of the headers ? */
1866 /* We ONLY want the headers */
1867
1868 assert (isEmpty());
1869 assert(mem_obj);
1870
1871 const HttpReply *rep = getReply();
1872 assert(rep);
1873
1874 buffer();
1875 rep->packHeadersInto(this);
1876 mem_obj->markEndOfReplyHeaders();
1877 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1878
1879 rep->body.packInto(this);
1880 flush();
1881 }
1882
1883 char const *
1884 StoreEntry::getSerialisedMetaData()
1885 {
1886 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1887 int swap_hdr_sz;
1888 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1889 storeSwapTLVFree(tlv_list);
1890 assert (swap_hdr_sz >= 0);
1891 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1892 return result;
1893 }
1894
1895 /**
1896 * Abandon the transient entry our worker has created if neither the shared
1897 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1898 * any, should notice and use Plan B instead of getting stuck waiting for us
1899 * to start swapping the entry out.
1900 */
1901 void
1902 StoreEntry::transientsAbandonmentCheck()
1903 {
1904 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1905 mem_obj->xitTable.index >= 0 && // other workers may be interested
1906 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1907 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1908 debugs(20, 7, "cannot be shared: " << *this);
1909 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1910 Store::Root().transientsAbandon(*this);
1911 }
1912 }
1913
1914 void
1915 StoreEntry::memOutDecision(const bool)
1916 {
1917 transientsAbandonmentCheck();
1918 }
1919
1920 void
1921 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1922 {
1923 // Abandon our transient entry if neither shared memory nor disk wants it.
1924 assert(mem_obj);
1925 mem_obj->swapout.decision = decision;
1926 transientsAbandonmentCheck();
1927 }
1928
1929 void
1930 StoreEntry::trimMemory(const bool preserveSwappable)
1931 {
1932 /*
1933 * DPW 2007-05-09
1934 * Bug #1943. We must not let go any data for IN_MEMORY
1935 * objects. We have to wait until the mem_status changes.
1936 */
1937 if (mem_status == IN_MEMORY)
1938 return;
1939
1940 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1941 return; // cannot trim because we do not load them again
1942
1943 if (preserveSwappable)
1944 mem_obj->trimSwappable();
1945 else
1946 mem_obj->trimUnSwappable();
1947
1948 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1949 }
1950
1951 bool
1952 StoreEntry::modifiedSince(const time_t ims, const int imslen) const
1953 {
1954 int object_length;
1955 const time_t mod_time = lastModified();
1956
1957 debugs(88, 3, "modifiedSince: '" << url() << "'");
1958
1959 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1960
1961 if (mod_time < 0)
1962 return true;
1963
1964 /* Find size of the object */
1965 object_length = getReply()->content_length;
1966
1967 if (object_length < 0)
1968 object_length = contentLen();
1969
1970 if (mod_time > ims) {
1971 debugs(88, 3, "--> YES: entry newer than client");
1972 return true;
1973 } else if (mod_time < ims) {
1974 debugs(88, 3, "--> NO: entry older than client");
1975 return false;
1976 } else if (imslen < 0) {
1977 debugs(88, 3, "--> NO: same LMT, no client length");
1978 return false;
1979 } else if (imslen == object_length) {
1980 debugs(88, 3, "--> NO: same LMT, same length");
1981 return false;
1982 } else {
1983 debugs(88, 3, "--> YES: same LMT, different length");
1984 return true;
1985 }
1986 }
1987
1988 bool
1989 StoreEntry::hasEtag(ETag &etag) const
1990 {
1991 if (const HttpReply *reply = getReply()) {
1992 etag = reply->header.getETag(Http::HdrType::ETAG);
1993 if (etag.str)
1994 return true;
1995 }
1996 return false;
1997 }
1998
1999 bool
2000 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2001 {
2002 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
2003 return hasOneOfEtags(reqETags, false);
2004 }
2005
2006 bool
2007 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2008 {
2009 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
2010 // weak comparison is allowed only for HEAD or full-body GET requests
2011 const bool allowWeakMatch = !request.flags.isRanged &&
2012 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
2013 return hasOneOfEtags(reqETags, allowWeakMatch);
2014 }
2015
2016 /// whether at least one of the request ETags matches entity ETag
2017 bool
2018 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2019 {
2020 const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
2021 if (!repETag.str)
2022 return strListIsMember(&reqETags, "*", ',');
2023
2024 bool matched = false;
2025 const char *pos = NULL;
2026 const char *item;
2027 int ilen;
2028 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2029 if (!strncmp(item, "*", ilen))
2030 matched = true;
2031 else {
2032 String str;
2033 str.append(item, ilen);
2034 ETag reqETag;
2035 if (etagParseInit(&reqETag, str.termedBuf())) {
2036 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2037 etagIsStrongEqual(repETag, reqETag);
2038 }
2039 }
2040 }
2041 return matched;
2042 }
2043
2044 Store::Disk &
2045 StoreEntry::disk() const
2046 {
2047 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2048 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
2049 assert(sd);
2050 return *sd;
2051 }
2052
2053 /*
2054 * return true if the entry is in a state where
2055 * it can accept more data (ie with write() method)
2056 */
2057 bool
2058 StoreEntry::isAccepting() const
2059 {
2060 if (STORE_PENDING != store_status)
2061 return false;
2062
2063 if (EBIT_TEST(flags, ENTRY_ABORTED))
2064 return false;
2065
2066 return true;
2067 }
2068
2069 const char *
2070 StoreEntry::describeTimestamps() const
2071 {
2072 LOCAL_ARRAY(char, buf, 256);
2073 snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2074 static_cast<int>(timestamp),
2075 static_cast<int>(lastref),
2076 static_cast<int>(lastModified_),
2077 static_cast<int>(expires));
2078 return buf;
2079 }
2080
2081 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2082 {
2083 os << "e:";
2084
2085 if (e.mem_obj) {
2086 if (e.mem_obj->xitTable.index > -1)
2087 os << 't' << e.mem_obj->xitTable.index;
2088 if (e.mem_obj->memCache.index > -1)
2089 os << 'm' << e.mem_obj->memCache.index;
2090 }
2091 if (e.swap_filen > -1 || e.swap_dirn > -1)
2092 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2093
2094 os << '=';
2095
2096 // print only non-default status values, using unique letters
2097 if (e.mem_status != NOT_IN_MEMORY ||
2098 e.store_status != STORE_PENDING ||
2099 e.swap_status != SWAPOUT_NONE ||
2100 e.ping_status != PING_NONE) {
2101 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2102 if (e.store_status != STORE_PENDING) os << 's';
2103 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2104 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2105 }
2106
2107 // print only set flags, using unique letters
2108 if (e.flags) {
2109 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2110 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
2111 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2112 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2113 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2114 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
2115 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2116 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2117 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2118 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2119 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2120 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2121 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2122 }
2123
2124 if (e.mem_obj && e.mem_obj->smpCollapsed)
2125 os << 'O';
2126
2127 return os << '/' << &e << '*' << e.locks();
2128 }
2129
2130 /* NullStoreEntry */
2131
2132 NullStoreEntry NullStoreEntry::_instance;
2133
2134 NullStoreEntry *
2135 NullStoreEntry::getInstance()
2136 {
2137 return &_instance;
2138 }
2139
2140 char const *
2141 NullStoreEntry::getMD5Text() const
2142 {
2143 return "N/A";
2144 }
2145
2146 void
2147 NullStoreEntry::operator delete(void*)
2148 {
2149 fatal ("Attempt to delete NullStoreEntry\n");
2150 }
2151
2152 char const *
2153 NullStoreEntry::getSerialisedMetaData()
2154 {
2155 return NULL;
2156 }
2157