]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Fix formatting errors in d8165775
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
16 #include "ETag.h"
17 #include "event.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "http.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
23 #include "mem_node.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
33 #include "stmem.h"
34 #include "Store.h"
35 #include "store/Controller.h"
36 #include "store/Disk.h"
37 #include "store/Disks.h"
38 #include "store_digest.h"
39 #include "store_key_md5.h"
40 #include "store_log.h"
41 #include "store_rebuild.h"
42 #include "StoreClient.h"
43 #include "StoreIOState.h"
44 #include "StoreMeta.h"
45 #include "StrList.h"
46 #include "swap_log_op.h"
47 #include "tools.h"
48 #if USE_DELAY_POOLS
49 #include "DelayPools.h"
50 #endif
51
52 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
53 * XXX: convert to MEMPROXY_CLASS() API
54 */
55 #include "mem/Pool.h"
56
57 #include <climits>
58 #include <stack>
59
60 #define REBUILD_TIMESTAMP_DELTA_MAX 2
61
62 #define STORE_IN_MEM_BUCKETS (229)
63
64 /** \todo Convert these string constants to enum string-arrays generated */
65
66 const char *memStatusStr[] = {
67 "NOT_IN_MEMORY",
68 "IN_MEMORY"
69 };
70
71 const char *pingStatusStr[] = {
72 "PING_NONE",
73 "PING_WAITING",
74 "PING_DONE"
75 };
76
77 const char *storeStatusStr[] = {
78 "STORE_OK",
79 "STORE_PENDING"
80 };
81
82 const char *swapStatusStr[] = {
83 "SWAPOUT_NONE",
84 "SWAPOUT_WRITING",
85 "SWAPOUT_DONE"
86 };
87
88 /*
89 * This defines an repl type
90 */
91
92 typedef struct _storerepl_entry storerepl_entry_t;
93
94 struct _storerepl_entry {
95 const char *typestr;
96 REMOVALPOLICYCREATE *create;
97 };
98
99 static storerepl_entry_t *storerepl_list = NULL;
100
101 /*
102 * local function prototypes
103 */
104 static int getKeyCounter(void);
105 static OBJH storeCheckCachableStats;
106 static EVH storeLateRelease;
107
108 /*
109 * local variables
110 */
111 static std::stack<StoreEntry*> LateReleaseStack;
112 MemAllocator *StoreEntry::pool = NULL;
113
114 void
115 Store::Stats(StoreEntry * output)
116 {
117 assert(output);
118 Root().stat(*output);
119 }
120
121 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
122 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
123 void *
124 StoreEntry::operator new (size_t bytecount)
125 {
126 assert(bytecount == sizeof (StoreEntry));
127
128 if (!pool) {
129 pool = memPoolCreate ("StoreEntry", bytecount);
130 }
131
132 return pool->alloc();
133 }
134
135 void
136 StoreEntry::operator delete (void *address)
137 {
138 pool->freeOne(address);
139 }
140
141 void
142 StoreEntry::makePublic(const KeyScope scope)
143 {
144 /* This object can be cached for a long time */
145 if (!EBIT_TEST(flags, RELEASE_REQUEST))
146 setPublicKey(scope);
147 }
148
149 void
150 StoreEntry::makePrivate(const bool shareable)
151 {
152 /* This object should never be cached at all */
153 expireNow();
154 releaseRequest(shareable); /* delete object when not used */
155 }
156
157 void
158 StoreEntry::clearPrivate()
159 {
160 EBIT_CLR(flags, KEY_PRIVATE);
161 shareableWhenPrivate = false;
162 }
163
164 void
165 StoreEntry::cacheNegatively()
166 {
167 /* This object may be negatively cached */
168 negativeCache();
169 makePublic();
170 }
171
172 size_t
173 StoreEntry::inUseCount()
174 {
175 if (!pool)
176 return 0;
177 return pool->getInUseCount();
178 }
179
180 const char *
181 StoreEntry::getMD5Text() const
182 {
183 return storeKeyText((const cache_key *)key);
184 }
185
186 #include "comm.h"
187
188 void
189 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
190 {
191 StoreEntry *anEntry = (StoreEntry *)theContext;
192 anEntry->delayAwareRead(aRead.conn,
193 aRead.buf,
194 aRead.len,
195 aRead.callback);
196 }
197
198 void
199 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
200 {
201 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
202 /* sketch: readdeferer* = getdeferer.
203 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
204 */
205
206 if (amountToRead <= 0) {
207 assert (mem_obj);
208 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
209 return;
210 }
211
212 if (fd_table[conn->fd].closing()) {
213 // Readers must have closing callbacks if they want to be notified. No
214 // readers appeared to care around 2009/12/14 as they skipped reading
215 // for other reasons. Closing may already be true at the delyaAwareRead
216 // call time or may happen while we wait after delayRead() above.
217 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
218 callback);
219 return; // the read callback will never be called
220 }
221
222 comm_read(conn, buf, amountToRead, callback);
223 }
224
225 size_t
226 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
227 {
228 if (mem_obj == NULL)
229 return aRange.end;
230
231 #if URL_CHECKSUM_DEBUG
232
233 mem_obj->checkUrlChecksum();
234
235 #endif
236
237 if (!mem_obj->readAheadPolicyCanRead())
238 return 0;
239
240 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
241 }
242
243 bool
244 StoreEntry::checkDeferRead(int) const
245 {
246 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
247 }
248
249 void
250 StoreEntry::setNoDelay(bool const newValue)
251 {
252 if (mem_obj)
253 mem_obj->setNoDelay(newValue);
254 }
255
256 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
257 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
258 // It does not mean we will read from disk exclusively (or at all!).
259 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
260 // XXX: Collapsed clients cannot predict their type.
261 store_client_t
262 StoreEntry::storeClientType() const
263 {
264 /* The needed offset isn't in memory
265 * XXX TODO: this is wrong for range requests
266 * as the needed offset may *not* be 0, AND
267 * offset 0 in the memory object is the HTTP headers.
268 */
269
270 assert(mem_obj);
271
272 if (mem_obj->inmem_lo)
273 return STORE_DISK_CLIENT;
274
275 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
276 /* I don't think we should be adding clients to aborted entries */
277 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
278 return STORE_MEM_CLIENT;
279 }
280
281 if (store_status == STORE_OK) {
282 /* the object has completed. */
283
284 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
285 if (swap_status == SWAPOUT_DONE) {
286 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
287 if (mem_obj->endOffset() == mem_obj->object_sz) {
288 /* hot object fully swapped in (XXX: or swapped out?) */
289 return STORE_MEM_CLIENT;
290 }
291 } else {
292 /* Memory-only, or currently being swapped out */
293 return STORE_MEM_CLIENT;
294 }
295 }
296 return STORE_DISK_CLIENT;
297 }
298
299 /* here and past, entry is STORE_PENDING */
300 /*
301 * If this is the first client, let it be the mem client
302 */
303 if (mem_obj->nclients == 1)
304 return STORE_MEM_CLIENT;
305
306 /*
307 * If there is no disk file to open yet, we must make this a
308 * mem client. If we can't open the swapin file before writing
309 * to the client, there is no guarantee that we will be able
310 * to open it later when we really need it.
311 */
312 if (swap_status == SWAPOUT_NONE)
313 return STORE_MEM_CLIENT;
314
315 /*
316 * otherwise, make subsequent clients read from disk so they
317 * can not delay the first, and vice-versa.
318 */
319 return STORE_DISK_CLIENT;
320 }
321
322 StoreEntry::StoreEntry() :
323 mem_obj(NULL),
324 timestamp(-1),
325 lastref(-1),
326 expires(-1),
327 lastModified_(-1),
328 swap_file_sz(0),
329 refcount(0),
330 flags(0),
331 swap_filen(-1),
332 swap_dirn(-1),
333 mem_status(NOT_IN_MEMORY),
334 ping_status(PING_NONE),
335 store_status(STORE_PENDING),
336 swap_status(SWAPOUT_NONE),
337 lock_count(0),
338 shareableWhenPrivate(false)
339 {
340 debugs(20, 5, "StoreEntry constructed, this=" << this);
341 }
342
343 StoreEntry::~StoreEntry()
344 {
345 debugs(20, 5, "StoreEntry destructed, this=" << this);
346 }
347
348 #if USE_ADAPTATION
349 void
350 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
351 {
352 if (!deferredProducer)
353 deferredProducer = producer;
354 else
355 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
356 *deferredProducer << ", requested call: " << *producer);
357 }
358
359 void
360 StoreEntry::kickProducer()
361 {
362 if (deferredProducer != NULL) {
363 ScheduleCallHere(deferredProducer);
364 deferredProducer = NULL;
365 }
366 }
367 #endif
368
369 void
370 StoreEntry::destroyMemObject()
371 {
372 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
373
374 if (MemObject *mem = mem_obj) {
375 // Store::Root() is FATALly missing during shutdown
376 if (mem->xitTable.index >= 0 && !shutting_down)
377 Store::Root().transientsDisconnect(*mem);
378 if (mem->memCache.index >= 0 && !shutting_down)
379 Store::Root().memoryDisconnect(*this);
380
381 setMemStatus(NOT_IN_MEMORY);
382 mem_obj = NULL;
383 delete mem;
384 }
385 }
386
387 void
388 destroyStoreEntry(void *data)
389 {
390 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
391 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
392 assert(e != NULL);
393
394 if (e == NullStoreEntry::getInstance())
395 return;
396
397 // Store::Root() is FATALly missing during shutdown
398 if (e->swap_filen >= 0 && !shutting_down)
399 e->disk().disconnect(*e);
400
401 e->destroyMemObject();
402
403 e->hashDelete();
404
405 assert(e->key == NULL);
406
407 delete e;
408 }
409
410 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
411
412 void
413 StoreEntry::hashInsert(const cache_key * someKey)
414 {
415 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
416 key = storeKeyDup(someKey);
417 hash_join(store_table, this);
418 }
419
420 void
421 StoreEntry::hashDelete()
422 {
423 if (key) { // some test cases do not create keys and do not hashInsert()
424 hash_remove_link(store_table, this);
425 storeKeyFree((const cache_key *)key);
426 key = NULL;
427 }
428 }
429
430 /* -------------------------------------------------------------------------- */
431
432 /* get rid of memory copy of the object */
433 void
434 StoreEntry::purgeMem()
435 {
436 if (mem_obj == NULL)
437 return;
438
439 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
440
441 Store::Root().memoryUnlink(*this);
442
443 if (swap_status != SWAPOUT_DONE)
444 release();
445 }
446
447 void
448 StoreEntry::lock(const char *context)
449 {
450 ++lock_count;
451 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
452 }
453
454 void
455 StoreEntry::touch()
456 {
457 lastref = squid_curtime;
458 }
459
460 void
461 StoreEntry::setReleaseFlag()
462 {
463 if (EBIT_TEST(flags, RELEASE_REQUEST))
464 return;
465
466 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
467
468 EBIT_SET(flags, RELEASE_REQUEST);
469
470 Store::Root().markForUnlink(*this);
471 }
472
473 void
474 StoreEntry::releaseRequest(const bool shareable)
475 {
476 if (EBIT_TEST(flags, RELEASE_REQUEST))
477 return;
478
479 setReleaseFlag(); // makes validToSend() false, preventing future hits
480
481 setPrivateKey(shareable);
482 }
483
484 int
485 StoreEntry::unlock(const char *context)
486 {
487 debugs(20, 3, (context ? context : "somebody") <<
488 " unlocking key " << getMD5Text() << ' ' << *this);
489 assert(lock_count > 0);
490 --lock_count;
491
492 if (lock_count)
493 return (int) lock_count;
494
495 if (store_status == STORE_PENDING)
496 setReleaseFlag();
497
498 assert(storePendingNClients(this) == 0);
499
500 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
501 this->release();
502 return 0;
503 }
504
505 if (EBIT_TEST(flags, KEY_PRIVATE))
506 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
507
508 Store::Root().handleIdleEntry(*this); // may delete us
509 return 0;
510 }
511
512 void
513 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
514 {
515 assert (aClient);
516 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
517
518 if (!result)
519 aClient->created (NullStoreEntry::getInstance());
520 else
521 aClient->created (result);
522 }
523
524 void
525 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
526 {
527 assert (aClient);
528 StoreEntry *result = storeGetPublicByRequest (request);
529
530 if (!result)
531 result = NullStoreEntry::getInstance();
532
533 aClient->created (result);
534 }
535
536 void
537 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
538 {
539 assert (aClient);
540 StoreEntry *result = storeGetPublic (uri, method);
541
542 if (!result)
543 result = NullStoreEntry::getInstance();
544
545 aClient->created (result);
546 }
547
548 StoreEntry *
549 storeGetPublic(const char *uri, const HttpRequestMethod& method)
550 {
551 return Store::Root().get(storeKeyPublic(uri, method));
552 }
553
554 StoreEntry *
555 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
556 {
557 return Store::Root().get(storeKeyPublicByRequestMethod(req, method, keyScope));
558 }
559
560 StoreEntry *
561 storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
562 {
563 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
564
565 if (e == NULL && req->method == Http::METHOD_HEAD)
566 /* We can generate a HEAD reply from a cached GET object */
567 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
568
569 return e;
570 }
571
572 static int
573 getKeyCounter(void)
574 {
575 static int key_counter = 0;
576
577 if (++key_counter < 0)
578 key_counter = 1;
579
580 return key_counter;
581 }
582
583 /* RBC 20050104 AFAICT this should become simpler:
584 * rather than reinserting with a special key it should be marked
585 * as 'released' and then cleaned up when refcounting indicates.
586 * the StoreHashIndex could well implement its 'released' in the
587 * current manner.
588 * Also, clean log writing should skip over ia,t
589 * Otherwise, we need a 'remove from the index but not the store
590 * concept'.
591 */
592 void
593 StoreEntry::setPrivateKey(const bool shareable)
594 {
595 if (key && EBIT_TEST(flags, KEY_PRIVATE)) {
596 // The entry is already private, but it may be still shareable.
597 if (!shareable)
598 shareableWhenPrivate = false;
599 return;
600 }
601
602 if (key) {
603 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
604
605 // TODO: move into SwapDir::markForUnlink() already called by Root()
606 if (swap_filen > -1)
607 storeDirSwapLog(this, SWAP_LOG_DEL);
608
609 hashDelete();
610 }
611
612 if (mem_obj && mem_obj->hasUris())
613 mem_obj->id = getKeyCounter();
614 const cache_key *newkey = storeKeyPrivate();
615
616 assert(hash_lookup(store_table, newkey) == NULL);
617 EBIT_SET(flags, KEY_PRIVATE);
618 shareableWhenPrivate = shareable;
619 hashInsert(newkey);
620 }
621
622 void
623 StoreEntry::setPublicKey(const KeyScope scope)
624 {
625 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
626 return; /* is already public */
627
628 assert(mem_obj);
629
630 /*
631 * We can't make RELEASE_REQUEST objects public. Depending on
632 * when RELEASE_REQUEST gets set, we might not be swapping out
633 * the object. If we're not swapping out, then subsequent
634 * store clients won't be able to access object data which has
635 * been freed from memory.
636 *
637 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
638 */
639 #if MORE_DEBUG_OUTPUT
640
641 if (EBIT_TEST(flags, RELEASE_REQUEST))
642 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
643
644 #endif
645
646 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
647
648 adjustVary();
649 forcePublicKey(calcPublicKey(scope));
650 }
651
652 void
653 StoreEntry::clearPublicKeyScope()
654 {
655 if (!key || EBIT_TEST(flags, KEY_PRIVATE))
656 return; // probably the old public key was deleted or made private
657
658 // TODO: adjustVary() when collapsed revalidation supports that
659
660 const cache_key *newKey = calcPublicKey(ksDefault);
661 if (!storeKeyHashCmp(key, newKey))
662 return; // probably another collapsed revalidation beat us to this change
663
664 forcePublicKey(newKey);
665 }
666
667 /// Unconditionally sets public key for this store entry.
668 /// Releases the old entry with the same public key (if any).
669 void
670 StoreEntry::forcePublicKey(const cache_key *newkey)
671 {
672 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
673 assert(e2 != this);
674 debugs(20, 3, "Making old " << *e2 << " private.");
675
676 // TODO: check whether there is any sense in keeping old entry
677 // shareable here. Leaving it non-shareable for now.
678 e2->setPrivateKey(false);
679 e2->release(false);
680 }
681
682 if (key)
683 hashDelete();
684
685 clearPrivate();
686
687 hashInsert(newkey);
688
689 if (swap_filen > -1)
690 storeDirSwapLog(this, SWAP_LOG_ADD);
691 }
692
693 /// Calculates correct public key for feeding forcePublicKey().
694 /// Assumes adjustVary() has been called for this entry already.
695 const cache_key *
696 StoreEntry::calcPublicKey(const KeyScope keyScope)
697 {
698 assert(mem_obj);
699 return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
700 storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
701 }
702
703 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
704 /// The vary_headers field is used to calculate the Vary marker key.
705 /// Releases the old Vary marker with an outdated key (if any).
706 void
707 StoreEntry::adjustVary()
708 {
709 assert(mem_obj);
710
711 if (!mem_obj->request)
712 return;
713
714 HttpRequestPointer request(mem_obj->request);
715
716 if (mem_obj->vary_headers.isEmpty()) {
717 /* First handle the case where the object no longer varies */
718 request->vary_headers.clear();
719 } else {
720 if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
721 /* Oops.. the variance has changed. Kill the base object
722 * to record the new variance key
723 */
724 request->vary_headers.clear(); /* free old "bad" variance key */
725 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
726 pe->release();
727 }
728
729 /* Make sure the request knows the variance status */
730 if (request->vary_headers.isEmpty())
731 request->vary_headers = httpMakeVaryMark(request.getRaw(), mem_obj->getReply().getRaw());
732 }
733
734 // TODO: storeGetPublic() calls below may create unlocked entries.
735 // We should add/use storeHas() API or lock/unlock those entries.
736 if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
737 /* Create "vary" base object */
738 String vary;
739 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
740 /* We are allowed to do this typecast */
741 HttpReply *rep = new HttpReply;
742 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
743 vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
744
745 if (vary.size()) {
746 /* Again, we own this structure layout */
747 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
748 vary.clean();
749 }
750
751 #if X_ACCELERATOR_VARY
752 vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
753
754 if (vary.size() > 0) {
755 /* Again, we own this structure layout */
756 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
757 vary.clean();
758 }
759
760 #endif
761 pe->replaceHttpReply(rep, false); // no write until key is public
762
763 pe->timestampsSet();
764
765 pe->makePublic();
766
767 pe->startWriting(); // after makePublic()
768
769 pe->complete();
770
771 pe->unlock("StoreEntry::forcePublicKey+Vary");
772 }
773 }
774
775 StoreEntry *
776 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
777 {
778 StoreEntry *e = NULL;
779 debugs(20, 3, "storeCreateEntry: '" << url << "'");
780
781 e = new StoreEntry();
782 e->createMemObject(url, log_url, method);
783
784 if (flags.cachable) {
785 EBIT_CLR(e->flags, RELEASE_REQUEST);
786 } else {
787 e->releaseRequest();
788 }
789
790 e->store_status = STORE_PENDING;
791 e->refcount = 0;
792 e->lastref = squid_curtime;
793 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
794 e->ping_status = PING_NONE;
795 EBIT_SET(e->flags, ENTRY_VALIDATED);
796 return e;
797 }
798
799 StoreEntry *
800 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
801 {
802 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
803 e->lock("storeCreateEntry");
804
805 if (neighbors_do_private_keys || !flags.hierarchical)
806 e->setPrivateKey(false);
807 else
808 e->setPublicKey();
809
810 return e;
811 }
812
813 /* Mark object as expired */
814 void
815 StoreEntry::expireNow()
816 {
817 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
818 expires = squid_curtime;
819 }
820
821 void
822 StoreEntry::write (StoreIOBuffer writeBuffer)
823 {
824 assert(mem_obj != NULL);
825 /* This assert will change when we teach the store to update */
826 PROF_start(StoreEntry_write);
827 assert(store_status == STORE_PENDING);
828
829 // XXX: caller uses content offset, but we also store headers
830 if (const HttpReplyPointer reply = mem_obj->getReply())
831 writeBuffer.offset += reply->hdr_sz;
832
833 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
834 PROF_stop(StoreEntry_write);
835 storeGetMemSpace(writeBuffer.length);
836 mem_obj->write(writeBuffer);
837
838 if (!EBIT_TEST(flags, DELAY_SENDING))
839 invokeHandlers();
840 }
841
842 /* Append incoming data from a primary server to an entry. */
843 void
844 StoreEntry::append(char const *buf, int len)
845 {
846 assert(mem_obj != NULL);
847 assert(len >= 0);
848 assert(store_status == STORE_PENDING);
849
850 StoreIOBuffer tempBuffer;
851 tempBuffer.data = (char *)buf;
852 tempBuffer.length = len;
853 /*
854 * XXX sigh, offset might be < 0 here, but it gets "corrected"
855 * later. This offset crap is such a mess.
856 */
857 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
858 write(tempBuffer);
859 }
860
861 void
862 StoreEntry::vappendf(const char *fmt, va_list vargs)
863 {
864 LOCAL_ARRAY(char, buf, 4096);
865 *buf = 0;
866 int x;
867
868 #ifdef VA_COPY
869 va_args ap;
870 /* Fix of bug 753r. The value of vargs is undefined
871 * after vsnprintf() returns. Make a copy of vargs
872 * incase we loop around and call vsnprintf() again.
873 */
874 VA_COPY(ap,vargs);
875 errno = 0;
876 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
877 fatal(xstrerr(errno));
878 return;
879 }
880 va_end(ap);
881 #else /* VA_COPY */
882 errno = 0;
883 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
884 fatal(xstrerr(errno));
885 return;
886 }
887 #endif /*VA_COPY*/
888
889 if (x < static_cast<int>(sizeof(buf))) {
890 append(buf, x);
891 return;
892 }
893
894 // okay, do it the slow way.
895 char *buf2 = new char[x+1];
896 int y = vsnprintf(buf2, x+1, fmt, vargs);
897 assert(y >= 0 && y == x);
898 append(buf2, y);
899 delete[] buf2;
900 }
901
902 // deprecated. use StoreEntry::appendf() instead.
903 void
904 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
905 {
906 va_list args;
907 va_start(args, fmt);
908 e->vappendf(fmt, args);
909 va_end(args);
910 }
911
912 // deprecated. use StoreEntry::appendf() instead.
913 void
914 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
915 {
916 e->vappendf(fmt, vargs);
917 }
918
919 struct _store_check_cachable_hist {
920
921 struct {
922 int non_get;
923 int not_entry_cachable;
924 int wrong_content_length;
925 int negative_cached;
926 int too_big;
927 int too_small;
928 int private_key;
929 int too_many_open_files;
930 int too_many_open_fds;
931 int missing_parts;
932 } no;
933
934 struct {
935 int Default;
936 } yes;
937 } store_check_cachable_hist;
938
939 int
940 storeTooManyDiskFilesOpen(void)
941 {
942 if (Config.max_open_disk_fds == 0)
943 return 0;
944
945 if (store_open_disk_fd > Config.max_open_disk_fds)
946 return 1;
947
948 return 0;
949 }
950
951 int
952 StoreEntry::checkTooSmall()
953 {
954 if (EBIT_TEST(flags, ENTRY_SPECIAL))
955 return 0;
956
957 if (STORE_OK == store_status)
958 if (mem_obj->object_sz >= 0 &&
959 mem_obj->object_sz < Config.Store.minObjectSize)
960 return 1;
961 if (getReply()->content_length > -1)
962 if (getReply()->content_length < Config.Store.minObjectSize)
963 return 1;
964 return 0;
965 }
966
967 bool
968 StoreEntry::checkTooBig() const
969 {
970 if (mem_obj->endOffset() > store_maxobjsize)
971 return true;
972
973 if (getReply()->content_length < 0)
974 return false;
975
976 return (getReply()->content_length > store_maxobjsize);
977 }
978
979 // TODO: move "too many open..." checks outside -- we are called too early/late
980 bool
981 StoreEntry::checkCachable()
982 {
983 // XXX: This method is used for both memory and disk caches, but some
984 // checks are specific to disk caches. Move them to mayStartSwapOut().
985
986 // XXX: This method may be called several times, sometimes with different
987 // outcomes, making store_check_cachable_hist counters misleading.
988
989 // check this first to optimize handling of repeated calls for uncachables
990 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
991 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
992 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
993 return 0; // avoid rerequesting release below
994 }
995
996 #if CACHE_ALL_METHODS
997
998 if (mem_obj->method != Http::METHOD_GET) {
999 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
1000 ++store_check_cachable_hist.no.non_get;
1001 } else
1002 #endif
1003 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
1004 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
1005 ++store_check_cachable_hist.no.wrong_content_length;
1006 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
1007 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
1008 ++store_check_cachable_hist.no.negative_cached;
1009 return 0; /* avoid release call below */
1010 } else if (!mem_obj || !getReply()) {
1011 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1012 // this segfault protection, but how can we get such a HIT?
1013 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1014 ++store_check_cachable_hist.no.missing_parts;
1015 } else if (checkTooBig()) {
1016 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1017 ++store_check_cachable_hist.no.too_big;
1018 } else if (checkTooSmall()) {
1019 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1020 ++store_check_cachable_hist.no.too_small;
1021 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1022 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1023 ++store_check_cachable_hist.no.private_key;
1024 } else if (swap_status != SWAPOUT_NONE) {
1025 /*
1026 * here we checked the swap_status because the remaining
1027 * cases are only relevant only if we haven't started swapping
1028 * out the object yet.
1029 */
1030 return 1;
1031 } else if (storeTooManyDiskFilesOpen()) {
1032 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1033 ++store_check_cachable_hist.no.too_many_open_files;
1034 } else if (fdNFree() < RESERVED_FD) {
1035 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1036 ++store_check_cachable_hist.no.too_many_open_fds;
1037 } else {
1038 ++store_check_cachable_hist.yes.Default;
1039 return 1;
1040 }
1041
1042 releaseRequest();
1043 return 0;
1044 }
1045
1046 void
1047 storeCheckCachableStats(StoreEntry *sentry)
1048 {
1049 storeAppendPrintf(sentry, "Category\t Count\n");
1050
1051 #if CACHE_ALL_METHODS
1052
1053 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1054 store_check_cachable_hist.no.non_get);
1055 #endif
1056
1057 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1058 store_check_cachable_hist.no.not_entry_cachable);
1059 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1060 store_check_cachable_hist.no.wrong_content_length);
1061 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1062 store_check_cachable_hist.no.negative_cached);
1063 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1064 store_check_cachable_hist.no.missing_parts);
1065 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1066 store_check_cachable_hist.no.too_big);
1067 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1068 store_check_cachable_hist.no.too_small);
1069 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1070 store_check_cachable_hist.no.private_key);
1071 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1072 store_check_cachable_hist.no.too_many_open_files);
1073 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1074 store_check_cachable_hist.no.too_many_open_fds);
1075 storeAppendPrintf(sentry, "yes.default\t%d\n",
1076 store_check_cachable_hist.yes.Default);
1077 }
1078
1079 void
1080 StoreEntry::lengthWentBad(const char *reason)
1081 {
1082 debugs(20, 3, "because " << reason << ": " << *this);
1083 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1084 releaseRequest();
1085 }
1086
1087 void
1088 StoreEntry::complete()
1089 {
1090 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1091
1092 if (store_status != STORE_PENDING) {
1093 /*
1094 * if we're not STORE_PENDING, then probably we got aborted
1095 * and there should be NO clients on this entry
1096 */
1097 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1098 assert(mem_obj->nclients == 0);
1099 return;
1100 }
1101
1102 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1103 * in use of object_sz?
1104 */
1105 mem_obj->object_sz = mem_obj->endOffset();
1106
1107 store_status = STORE_OK;
1108
1109 assert(mem_status == NOT_IN_MEMORY);
1110
1111 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1112 lengthWentBad("!validLength() in complete()");
1113
1114 #if USE_CACHE_DIGESTS
1115 if (mem_obj->request)
1116 mem_obj->request->hier.store_complete_stop = current_time;
1117
1118 #endif
1119 /*
1120 * We used to call invokeHandlers, then storeSwapOut. However,
1121 * Madhukar Reddy <myreddy@persistence.com> reported that
1122 * responses without content length would sometimes get released
1123 * in client_side, thinking that the response is incomplete.
1124 */
1125 invokeHandlers();
1126 }
1127
1128 /*
1129 * Someone wants to abort this transfer. Set the reason in the
1130 * request structure, call the callback and mark the
1131 * entry for releasing
1132 */
1133 void
1134 StoreEntry::abort()
1135 {
1136 ++statCounter.aborted_requests;
1137 assert(store_status == STORE_PENDING);
1138 assert(mem_obj != NULL);
1139 debugs(20, 6, "storeAbort: " << getMD5Text());
1140
1141 lock("StoreEntry::abort"); /* lock while aborting */
1142 negativeCache();
1143
1144 releaseRequest();
1145
1146 EBIT_SET(flags, ENTRY_ABORTED);
1147
1148 setMemStatus(NOT_IN_MEMORY);
1149
1150 store_status = STORE_OK;
1151
1152 /* Notify the server side */
1153
1154 /*
1155 * DPW 2007-05-07
1156 * Should we check abort.data for validity?
1157 */
1158 if (mem_obj->abort.callback) {
1159 if (!cbdataReferenceValid(mem_obj->abort.data))
1160 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1161 eventAdd("mem_obj->abort.callback",
1162 mem_obj->abort.callback,
1163 mem_obj->abort.data,
1164 0.0,
1165 true);
1166 unregisterAbort();
1167 }
1168
1169 /* XXX Should we reverse these two, so that there is no
1170 * unneeded disk swapping triggered?
1171 */
1172 /* Notify the client side */
1173 invokeHandlers();
1174
1175 // abort swap out, invalidating what was created so far (release follows)
1176 swapOutFileClose(StoreIOState::writerGone);
1177
1178 unlock("StoreEntry::abort"); /* unlock */
1179 }
1180
1181 /**
1182 * Clear Memory storage to accommodate the given object len
1183 */
1184 void
1185 storeGetMemSpace(int size)
1186 {
1187 PROF_start(storeGetMemSpace);
1188 StoreEntry *e = NULL;
1189 int released = 0;
1190 static time_t last_check = 0;
1191 size_t pages_needed;
1192 RemovalPurgeWalker *walker;
1193
1194 if (squid_curtime == last_check) {
1195 PROF_stop(storeGetMemSpace);
1196 return;
1197 }
1198
1199 last_check = squid_curtime;
1200
1201 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1202
1203 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1204 PROF_stop(storeGetMemSpace);
1205 return;
1206 }
1207
1208 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1209 " pages");
1210
1211 /* XXX what to set as max_scan here? */
1212 walker = mem_policy->PurgeInit(mem_policy, 100000);
1213
1214 while ((e = walker->Next(walker))) {
1215 e->purgeMem();
1216 ++released;
1217
1218 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1219 break;
1220 }
1221
1222 walker->Done(walker);
1223 debugs(20, 3, "storeGetMemSpace stats:");
1224 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1225 debugs(20, 3, " " << std::setw(6) << released << " were released");
1226 PROF_stop(storeGetMemSpace);
1227 }
1228
1229 /* thunk through to Store::Root().maintain(). Note that this would be better still
1230 * if registered against the root store itself, but that requires more complex
1231 * update logic - bigger fish to fry first. Long term each store when
1232 * it becomes active will self register
1233 */
1234 void
1235 Store::Maintain(void *)
1236 {
1237 Store::Root().maintain();
1238
1239 /* Reregister a maintain event .. */
1240 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1241
1242 }
1243
1244 /* The maximum objects to scan for maintain storage space */
1245 #define MAINTAIN_MAX_SCAN 1024
1246 #define MAINTAIN_MAX_REMOVE 64
1247
1248 /* release an object from a cache */
1249 void
1250 StoreEntry::release(const bool shareable)
1251 {
1252 PROF_start(storeRelease);
1253 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1254 /* If, for any reason we can't discard this object because of an
1255 * outstanding request, mark it for pending release */
1256
1257 if (locked()) {
1258 expireNow();
1259 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1260 releaseRequest(shareable);
1261 PROF_stop(storeRelease);
1262 return;
1263 }
1264
1265 if (Store::Controller::store_dirs_rebuilding && swap_filen > -1) {
1266 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1267
1268 Store::Root().memoryUnlink(*this);
1269
1270 setPrivateKey(shareable);
1271
1272 // lock the entry until rebuilding is done
1273 lock("storeLateRelease");
1274 setReleaseFlag();
1275 LateReleaseStack.push(this);
1276 return;
1277 }
1278
1279 storeLog(STORE_LOG_RELEASE, this);
1280 if (swap_filen > -1 && !EBIT_TEST(flags, KEY_PRIVATE)) {
1281 // log before unlink() below clears swap_filen
1282 storeDirSwapLog(this, SWAP_LOG_DEL);
1283 }
1284
1285 Store::Root().unlink(*this);
1286 destroyStoreEntry(static_cast<hash_link *>(this));
1287 PROF_stop(storeRelease);
1288 }
1289
1290 static void
1291 storeLateRelease(void *)
1292 {
1293 StoreEntry *e;
1294 static int n = 0;
1295
1296 if (Store::Controller::store_dirs_rebuilding) {
1297 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1298 return;
1299 }
1300
1301 // TODO: this works but looks unelegant.
1302 for (int i = 0; i < 10; ++i) {
1303 if (LateReleaseStack.empty()) {
1304 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1305 return;
1306 } else {
1307 e = LateReleaseStack.top();
1308 LateReleaseStack.pop();
1309 }
1310
1311 e->unlock("storeLateRelease");
1312 ++n;
1313 }
1314
1315 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1316 }
1317
1318 /* return 1 if a store entry is locked */
1319 int
1320 StoreEntry::locked() const
1321 {
1322 if (lock_count)
1323 return 1;
1324
1325 /*
1326 * SPECIAL, PUBLIC entries should be "locked";
1327 * XXX: Their owner should lock them then instead of relying on this hack.
1328 */
1329 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1330 if (!EBIT_TEST(flags, KEY_PRIVATE))
1331 return 1;
1332
1333 return 0;
1334 }
1335
1336 bool
1337 StoreEntry::validLength() const
1338 {
1339 int64_t diff;
1340 const HttpReply *reply;
1341 assert(mem_obj != NULL);
1342 reply = getReply();
1343 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1344 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1345 objectLen());
1346 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1347 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1348
1349 if (reply->content_length < 0) {
1350 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1351 return 1;
1352 }
1353
1354 if (reply->hdr_sz == 0) {
1355 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1356 return 1;
1357 }
1358
1359 if (mem_obj->method == Http::METHOD_HEAD) {
1360 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1361 return 1;
1362 }
1363
1364 if (reply->sline.status() == Http::scNotModified)
1365 return 1;
1366
1367 if (reply->sline.status() == Http::scNoContent)
1368 return 1;
1369
1370 diff = reply->hdr_sz + reply->content_length - objectLen();
1371
1372 if (diff == 0)
1373 return 1;
1374
1375 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1376
1377 return 0;
1378 }
1379
1380 static void
1381 storeRegisterWithCacheManager(void)
1382 {
1383 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1384 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1385 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1386 storeCheckCachableStats, 0, 1);
1387 }
1388
1389 void
1390 storeInit(void)
1391 {
1392 storeKeyInit();
1393 mem_policy = createRemovalPolicy(Config.memPolicy);
1394 storeDigestInit();
1395 storeLogOpen();
1396 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1397 Store::Root().init();
1398 storeRebuildStart();
1399
1400 storeRegisterWithCacheManager();
1401 }
1402
1403 void
1404 storeConfigure(void)
1405 {
1406 Store::Root().updateLimits();
1407 }
1408
1409 bool
1410 StoreEntry::memoryCachable()
1411 {
1412 if (!checkCachable())
1413 return 0;
1414
1415 if (mem_obj == NULL)
1416 return 0;
1417
1418 if (mem_obj->data_hdr.size() == 0)
1419 return 0;
1420
1421 if (mem_obj->inmem_lo != 0)
1422 return 0;
1423
1424 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1425 return 0;
1426
1427 return 1;
1428 }
1429
1430 int
1431 StoreEntry::checkNegativeHit() const
1432 {
1433 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1434 return 0;
1435
1436 if (expires <= squid_curtime)
1437 return 0;
1438
1439 if (store_status != STORE_OK)
1440 return 0;
1441
1442 return 1;
1443 }
1444
1445 /**
1446 * Set object for negative caching.
1447 * Preserves any expiry information given by the server.
1448 * In absence of proper expiry info it will set to expire immediately,
1449 * or with HTTP-violations enabled the configured negative-TTL is observed
1450 */
1451 void
1452 StoreEntry::negativeCache()
1453 {
1454 // XXX: should make the default for expires 0 instead of -1
1455 // so we can distinguish "Expires: -1" from nothing.
1456 if (expires <= 0)
1457 #if USE_HTTP_VIOLATIONS
1458 expires = squid_curtime + Config.negativeTtl;
1459 #else
1460 expires = squid_curtime;
1461 #endif
1462 EBIT_SET(flags, ENTRY_NEGCACHED);
1463 }
1464
1465 void
1466 storeFreeMemory(void)
1467 {
1468 Store::FreeMemory();
1469 #if USE_CACHE_DIGESTS
1470 delete store_digest;
1471 #endif
1472 store_digest = NULL;
1473 }
1474
1475 int
1476 expiresMoreThan(time_t expires, time_t when)
1477 {
1478 if (expires < 0) /* No Expires given */
1479 return 1;
1480
1481 return (expires > (squid_curtime + when));
1482 }
1483
1484 int
1485 StoreEntry::validToSend() const
1486 {
1487 if (EBIT_TEST(flags, RELEASE_REQUEST))
1488 return 0;
1489
1490 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1491 if (expires <= squid_curtime)
1492 return 0;
1493
1494 if (EBIT_TEST(flags, ENTRY_ABORTED))
1495 return 0;
1496
1497 // now check that the entry has a cache backing or is collapsed
1498 if (swap_filen > -1) // backed by a disk cache
1499 return 1;
1500
1501 if (swappingOut()) // will be backed by a disk cache
1502 return 1;
1503
1504 if (!mem_obj) // not backed by a memory cache and not collapsed
1505 return 0;
1506
1507 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1508 // disk cache backing that store_client constructor will assert. XXX: This
1509 // is wrong for range requests (that could feed off nibbled memory) and for
1510 // entries backed by the shared memory cache (that could, in theory, get
1511 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1512 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1513 return 0;
1514
1515 // The following check is correct but useless at this position. TODO: Move
1516 // it up when the shared memory cache can either replenish locally nibbled
1517 // bytes or, better, does not use local RAM copy at all.
1518 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1519 // return 1;
1520
1521 return 1;
1522 }
1523
1524 bool
1525 StoreEntry::timestampsSet()
1526 {
1527 const HttpReply *reply = getReply();
1528 time_t served_date = reply->date;
1529 int age = reply->header.getInt(Http::HdrType::AGE);
1530 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1531 /* make sure that 0 <= served_date <= squid_curtime */
1532
1533 if (served_date < 0 || served_date > squid_curtime)
1534 served_date = squid_curtime;
1535
1536 /* Bug 1791:
1537 * If the returned Date: is more than 24 hours older than
1538 * the squid_curtime, then one of us needs to use NTP to set our
1539 * clock. We'll pretend that our clock is right.
1540 */
1541 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1542 served_date = squid_curtime;
1543
1544 /*
1545 * Compensate with Age header if origin server clock is ahead
1546 * of us and there is a cache in between us and the origin
1547 * server. But DONT compensate if the age value is larger than
1548 * squid_curtime because it results in a negative served_date.
1549 */
1550 if (age > squid_curtime - served_date)
1551 if (squid_curtime > age)
1552 served_date = squid_curtime - age;
1553
1554 // compensate for Squid-to-server and server-to-Squid delays
1555 if (mem_obj && mem_obj->request) {
1556 struct timeval responseTime;
1557 if (mem_obj->request->hier.peerResponseTime(responseTime) &&
1558 responseTime.tv_sec < squid_curtime)
1559 served_date -= (squid_curtime - responseTime.tv_sec);
1560 }
1561
1562 time_t exp = 0;
1563 if (reply->expires > 0 && reply->date > -1)
1564 exp = served_date + (reply->expires - reply->date);
1565 else
1566 exp = reply->expires;
1567
1568 if (timestamp == served_date && expires == exp) {
1569 // if the reply lacks LMT, then we now know that our effective
1570 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1571 // new modification times must match
1572 if (reply->last_modified < 0 || reply->last_modified == lastModified())
1573 return false; // nothing has changed
1574 }
1575
1576 expires = exp;
1577
1578 lastModified_ = reply->last_modified;
1579
1580 timestamp = served_date;
1581
1582 return true;
1583 }
1584
1585 void
1586 StoreEntry::registerAbort(STABH * cb, void *data)
1587 {
1588 assert(mem_obj);
1589 assert(mem_obj->abort.callback == NULL);
1590 mem_obj->abort.callback = cb;
1591 mem_obj->abort.data = cbdataReference(data);
1592 }
1593
1594 void
1595 StoreEntry::unregisterAbort()
1596 {
1597 assert(mem_obj);
1598 if (mem_obj->abort.callback) {
1599 mem_obj->abort.callback = NULL;
1600 cbdataReferenceDone(mem_obj->abort.data);
1601 }
1602 }
1603
1604 void
1605 StoreEntry::dump(int l) const
1606 {
1607 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1608 debugs(20, l, "StoreEntry->next: " << next);
1609 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1610 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1611 debugs(20, l, "StoreEntry->lastref: " << lastref);
1612 debugs(20, l, "StoreEntry->expires: " << expires);
1613 debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
1614 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1615 debugs(20, l, "StoreEntry->refcount: " << refcount);
1616 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1617 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1618 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1619 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1620 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1621 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1622 debugs(20, l, "StoreEntry->store_status: " << store_status);
1623 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1624 }
1625
1626 /*
1627 * NOTE, this function assumes only two mem states
1628 */
1629 void
1630 StoreEntry::setMemStatus(mem_status_t new_status)
1631 {
1632 if (new_status == mem_status)
1633 return;
1634
1635 // are we using a shared memory cache?
1636 if (Config.memShared && IamWorkerProcess()) {
1637 // This method was designed to update replacement policy, not to
1638 // actually purge something from the memory cache (TODO: rename?).
1639 // Shared memory cache does not have a policy that needs updates.
1640 mem_status = new_status;
1641 return;
1642 }
1643
1644 assert(mem_obj != NULL);
1645
1646 if (new_status == IN_MEMORY) {
1647 assert(mem_obj->inmem_lo == 0);
1648
1649 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1650 debugs(20, 4, "not inserting special " << *this << " into policy");
1651 } else {
1652 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1653 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1654 }
1655
1656 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1657 } else {
1658 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1659 debugs(20, 4, "not removing special " << *this << " from policy");
1660 } else {
1661 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1662 debugs(20, 4, "removed " << *this);
1663 }
1664
1665 --hot_obj_count;
1666 }
1667
1668 mem_status = new_status;
1669 }
1670
1671 const char *
1672 StoreEntry::url() const
1673 {
1674 if (mem_obj == NULL)
1675 return "[null_mem_obj]";
1676 else
1677 return mem_obj->storeId();
1678 }
1679
1680 void
1681 StoreEntry::createMemObject()
1682 {
1683 assert(!mem_obj);
1684 mem_obj = new MemObject();
1685 }
1686
1687 void
1688 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1689 {
1690 assert(!mem_obj);
1691 ensureMemObject(aUrl, aLogUrl, aMethod);
1692 }
1693
1694 void
1695 StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1696 {
1697 if (!mem_obj)
1698 mem_obj = new MemObject();
1699 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1700 }
1701
1702 /** disable sending content to the clients.
1703 *
1704 * This just sets DELAY_SENDING.
1705 */
1706 void
1707 StoreEntry::buffer()
1708 {
1709 EBIT_SET(flags, DELAY_SENDING);
1710 }
1711
1712 /** flush any buffered content.
1713 *
1714 * This just clears DELAY_SENDING and Invokes the handlers
1715 * to begin sending anything that may be buffered.
1716 */
1717 void
1718 StoreEntry::flush()
1719 {
1720 if (EBIT_TEST(flags, DELAY_SENDING)) {
1721 EBIT_CLR(flags, DELAY_SENDING);
1722 invokeHandlers();
1723 }
1724 }
1725
1726 int64_t
1727 StoreEntry::objectLen() const
1728 {
1729 assert(mem_obj != NULL);
1730 return mem_obj->object_sz;
1731 }
1732
1733 int64_t
1734 StoreEntry::contentLen() const
1735 {
1736 assert(mem_obj != NULL);
1737 assert(getReply() != NULL);
1738 return objectLen() - getReply()->hdr_sz;
1739 }
1740
1741 HttpReply const *
1742 StoreEntry::getReply() const
1743 {
1744 return (mem_obj ? mem_obj->getReply().getRaw() : nullptr);
1745 }
1746
1747 void
1748 StoreEntry::reset()
1749 {
1750 assert (mem_obj);
1751 debugs(20, 3, url());
1752 mem_obj->reset();
1753 expires = lastModified_ = timestamp = -1;
1754 }
1755
1756 /*
1757 * storeFsInit
1758 *
1759 * This routine calls the SETUP routine for each fs type.
1760 * I don't know where the best place for this is, and I'm not going to shuffle
1761 * around large chunks of code right now (that can be done once its working.)
1762 */
1763 void
1764 storeFsInit(void)
1765 {
1766 storeReplSetup();
1767 }
1768
1769 /*
1770 * called to add another store removal policy module
1771 */
1772 void
1773 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1774 {
1775 int i;
1776
1777 /* find the number of currently known repl types */
1778 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1779 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1780 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1781 return;
1782 }
1783 }
1784
1785 /* add the new type */
1786 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1787
1788 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1789
1790 storerepl_list[i].typestr = type;
1791
1792 storerepl_list[i].create = create;
1793 }
1794
1795 /*
1796 * Create a removal policy instance
1797 */
1798 RemovalPolicy *
1799 createRemovalPolicy(RemovalPolicySettings * settings)
1800 {
1801 storerepl_entry_t *r;
1802
1803 for (r = storerepl_list; r && r->typestr; ++r) {
1804 if (strcmp(r->typestr, settings->type) == 0)
1805 return r->create(settings->args);
1806 }
1807
1808 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1809 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1810 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1811 fatalf("ERROR: Unknown policy %s\n", settings->type);
1812 return NULL; /* NOTREACHED */
1813 }
1814
1815 #if 0
1816 void
1817 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1818 {
1819 if (e->swap_file_number == filn)
1820 return;
1821
1822 if (filn < 0) {
1823 assert(-1 == filn);
1824 storeDirMapBitReset(e->swap_file_number);
1825 storeDirLRUDelete(e);
1826 e->swap_file_number = -1;
1827 } else {
1828 assert(-1 == e->swap_file_number);
1829 storeDirMapBitSet(e->swap_file_number = filn);
1830 storeDirLRUAdd(e);
1831 }
1832 }
1833
1834 #endif
1835
1836 void
1837 StoreEntry::storeErrorResponse(HttpReply *reply)
1838 {
1839 lock("StoreEntry::storeErrorResponse");
1840 buffer();
1841 replaceHttpReply(reply);
1842 flush();
1843 complete();
1844 negativeCache();
1845 releaseRequest();
1846 unlock("StoreEntry::storeErrorResponse");
1847 }
1848
1849 /*
1850 * Replace a store entry with
1851 * a new reply. This eats the reply.
1852 */
1853 void
1854 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1855 {
1856 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1857
1858 if (!mem_obj) {
1859 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1860 return;
1861 }
1862
1863 mem_obj->replaceReply(HttpReplyPointer(rep));
1864
1865 if (andStartWriting)
1866 startWriting();
1867 }
1868
1869 void
1870 StoreEntry::startWriting()
1871 {
1872 /* TODO: when we store headers separately remove the header portion */
1873 /* TODO: mark the length of the headers ? */
1874 /* We ONLY want the headers */
1875
1876 assert (isEmpty());
1877 assert(mem_obj);
1878
1879 const HttpReply *rep = getReply();
1880 assert(rep);
1881
1882 buffer();
1883 rep->packHeadersInto(this);
1884 mem_obj->markEndOfReplyHeaders();
1885 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1886
1887 rep->body.packInto(this);
1888 flush();
1889 }
1890
1891 char const *
1892 StoreEntry::getSerialisedMetaData()
1893 {
1894 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1895 int swap_hdr_sz;
1896 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1897 storeSwapTLVFree(tlv_list);
1898 assert (swap_hdr_sz >= 0);
1899 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1900 return result;
1901 }
1902
1903 /**
1904 * Abandon the transient entry our worker has created if neither the shared
1905 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1906 * any, should notice and use Plan B instead of getting stuck waiting for us
1907 * to start swapping the entry out.
1908 */
1909 void
1910 StoreEntry::transientsAbandonmentCheck()
1911 {
1912 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1913 mem_obj->xitTable.index >= 0 && // other workers may be interested
1914 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1915 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1916 debugs(20, 7, "cannot be shared: " << *this);
1917 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1918 Store::Root().transientsAbandon(*this);
1919 }
1920 }
1921
1922 void
1923 StoreEntry::memOutDecision(const bool)
1924 {
1925 transientsAbandonmentCheck();
1926 }
1927
1928 void
1929 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1930 {
1931 // Abandon our transient entry if neither shared memory nor disk wants it.
1932 assert(mem_obj);
1933 mem_obj->swapout.decision = decision;
1934 transientsAbandonmentCheck();
1935 }
1936
1937 void
1938 StoreEntry::trimMemory(const bool preserveSwappable)
1939 {
1940 /*
1941 * DPW 2007-05-09
1942 * Bug #1943. We must not let go any data for IN_MEMORY
1943 * objects. We have to wait until the mem_status changes.
1944 */
1945 if (mem_status == IN_MEMORY)
1946 return;
1947
1948 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1949 return; // cannot trim because we do not load them again
1950
1951 if (preserveSwappable)
1952 mem_obj->trimSwappable();
1953 else
1954 mem_obj->trimUnSwappable();
1955
1956 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1957 }
1958
1959 bool
1960 StoreEntry::modifiedSince(const time_t ims, const int imslen) const
1961 {
1962 int object_length;
1963 const time_t mod_time = lastModified();
1964
1965 debugs(88, 3, "modifiedSince: '" << url() << "'");
1966
1967 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1968
1969 if (mod_time < 0)
1970 return true;
1971
1972 /* Find size of the object */
1973 object_length = getReply()->content_length;
1974
1975 if (object_length < 0)
1976 object_length = contentLen();
1977
1978 if (mod_time > ims) {
1979 debugs(88, 3, "--> YES: entry newer than client");
1980 return true;
1981 } else if (mod_time < ims) {
1982 debugs(88, 3, "--> NO: entry older than client");
1983 return false;
1984 } else if (imslen < 0) {
1985 debugs(88, 3, "--> NO: same LMT, no client length");
1986 return false;
1987 } else if (imslen == object_length) {
1988 debugs(88, 3, "--> NO: same LMT, same length");
1989 return false;
1990 } else {
1991 debugs(88, 3, "--> YES: same LMT, different length");
1992 return true;
1993 }
1994 }
1995
1996 bool
1997 StoreEntry::hasEtag(ETag &etag) const
1998 {
1999 if (const HttpReply *reply = getReply()) {
2000 etag = reply->header.getETag(Http::HdrType::ETAG);
2001 if (etag.str)
2002 return true;
2003 }
2004 return false;
2005 }
2006
2007 bool
2008 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2009 {
2010 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
2011 return hasOneOfEtags(reqETags, false);
2012 }
2013
2014 bool
2015 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2016 {
2017 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
2018 // weak comparison is allowed only for HEAD or full-body GET requests
2019 const bool allowWeakMatch = !request.flags.isRanged &&
2020 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
2021 return hasOneOfEtags(reqETags, allowWeakMatch);
2022 }
2023
2024 /// whether at least one of the request ETags matches entity ETag
2025 bool
2026 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2027 {
2028 const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
2029 if (!repETag.str) {
2030 static SBuf asterisk("*", 1);
2031 return strListIsMember(&reqETags, asterisk, ',');
2032 }
2033
2034 bool matched = false;
2035 const char *pos = NULL;
2036 const char *item;
2037 int ilen;
2038 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2039 if (!strncmp(item, "*", ilen))
2040 matched = true;
2041 else {
2042 String str;
2043 str.append(item, ilen);
2044 ETag reqETag;
2045 if (etagParseInit(&reqETag, str.termedBuf())) {
2046 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2047 etagIsStrongEqual(repETag, reqETag);
2048 }
2049 }
2050 }
2051 return matched;
2052 }
2053
2054 Store::Disk &
2055 StoreEntry::disk() const
2056 {
2057 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2058 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
2059 assert(sd);
2060 return *sd;
2061 }
2062
2063 /*
2064 * return true if the entry is in a state where
2065 * it can accept more data (ie with write() method)
2066 */
2067 bool
2068 StoreEntry::isAccepting() const
2069 {
2070 if (STORE_PENDING != store_status)
2071 return false;
2072
2073 if (EBIT_TEST(flags, ENTRY_ABORTED))
2074 return false;
2075
2076 return true;
2077 }
2078
2079 const char *
2080 StoreEntry::describeTimestamps() const
2081 {
2082 LOCAL_ARRAY(char, buf, 256);
2083 snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2084 static_cast<int>(timestamp),
2085 static_cast<int>(lastref),
2086 static_cast<int>(lastModified_),
2087 static_cast<int>(expires));
2088 return buf;
2089 }
2090
2091 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2092 {
2093 os << "e:";
2094
2095 if (e.mem_obj) {
2096 if (e.mem_obj->xitTable.index > -1)
2097 os << 't' << e.mem_obj->xitTable.index;
2098 if (e.mem_obj->memCache.index > -1)
2099 os << 'm' << e.mem_obj->memCache.index;
2100 }
2101 if (e.swap_filen > -1 || e.swap_dirn > -1)
2102 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2103
2104 os << '=';
2105
2106 // print only non-default status values, using unique letters
2107 if (e.mem_status != NOT_IN_MEMORY ||
2108 e.store_status != STORE_PENDING ||
2109 e.swap_status != SWAPOUT_NONE ||
2110 e.ping_status != PING_NONE) {
2111 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2112 if (e.store_status != STORE_PENDING) os << 's';
2113 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2114 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2115 }
2116
2117 // print only set flags, using unique letters
2118 if (e.flags) {
2119 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2120 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
2121 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2122 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2123 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2124 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
2125 if (EBIT_TEST(e.flags, KEY_PRIVATE)) {
2126 os << 'I';
2127 if (e.shareableWhenPrivate)
2128 os << 'H';
2129 }
2130 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2131 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2132 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2133 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2134 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2135 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2136 }
2137
2138 if (e.mem_obj && e.mem_obj->smpCollapsed)
2139 os << 'O';
2140
2141 return os << '/' << &e << '*' << e.locks();
2142 }
2143
2144 /* NullStoreEntry */
2145
2146 NullStoreEntry NullStoreEntry::_instance;
2147
2148 NullStoreEntry *
2149 NullStoreEntry::getInstance()
2150 {
2151 return &_instance;
2152 }
2153
2154 char const *
2155 NullStoreEntry::getMD5Text() const
2156 {
2157 return "N/A";
2158 }
2159
2160 void
2161 NullStoreEntry::operator delete(void*)
2162 {
2163 fatal ("Attempt to delete NullStoreEntry\n");
2164 }
2165
2166 char const *
2167 NullStoreEntry::getSerialisedMetaData()
2168 {
2169 return NULL;
2170 }
2171