]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Fix two read-ahead problems related to delay pools (or lack of thereof).
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
16 #include "ETag.h"
17 #include "event.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "http.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
23 #include "mem_node.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
33 #include "stmem.h"
34 #include "Store.h"
35 #include "store/Controller.h"
36 #include "store/Disk.h"
37 #include "store/Disks.h"
38 #include "store_digest.h"
39 #include "store_key_md5.h"
40 #include "store_log.h"
41 #include "store_rebuild.h"
42 #include "StoreClient.h"
43 #include "StoreIOState.h"
44 #include "StoreMeta.h"
45 #include "StrList.h"
46 #include "swap_log_op.h"
47 #include "tools.h"
48 #if USE_DELAY_POOLS
49 #include "DelayPools.h"
50 #endif
51
52 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
53 * XXX: convert to MEMPROXY_CLASS() API
54 */
55 #include "mem/Pool.h"
56
57 #include <climits>
58 #include <stack>
59
60 #define REBUILD_TIMESTAMP_DELTA_MAX 2
61
62 #define STORE_IN_MEM_BUCKETS (229)
63
64 /** \todo Convert these string constants to enum string-arrays generated */
65
66 const char *memStatusStr[] = {
67 "NOT_IN_MEMORY",
68 "IN_MEMORY"
69 };
70
71 const char *pingStatusStr[] = {
72 "PING_NONE",
73 "PING_WAITING",
74 "PING_DONE"
75 };
76
77 const char *storeStatusStr[] = {
78 "STORE_OK",
79 "STORE_PENDING"
80 };
81
82 const char *swapStatusStr[] = {
83 "SWAPOUT_NONE",
84 "SWAPOUT_WRITING",
85 "SWAPOUT_DONE"
86 };
87
88 /*
89 * This defines an repl type
90 */
91
92 typedef struct _storerepl_entry storerepl_entry_t;
93
94 struct _storerepl_entry {
95 const char *typestr;
96 REMOVALPOLICYCREATE *create;
97 };
98
99 static storerepl_entry_t *storerepl_list = NULL;
100
101 /*
102 * local function prototypes
103 */
104 static int getKeyCounter(void);
105 static OBJH storeCheckCachableStats;
106 static EVH storeLateRelease;
107
108 /*
109 * local variables
110 */
111 static std::stack<StoreEntry*> LateReleaseStack;
112 MemAllocator *StoreEntry::pool = NULL;
113
114 void
115 Store::Stats(StoreEntry * output)
116 {
117 assert(output);
118 Root().stat(*output);
119 }
120
121 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
122 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
123 void *
124 StoreEntry::operator new (size_t bytecount)
125 {
126 assert(bytecount == sizeof (StoreEntry));
127
128 if (!pool) {
129 pool = memPoolCreate ("StoreEntry", bytecount);
130 }
131
132 return pool->alloc();
133 }
134
135 void
136 StoreEntry::operator delete (void *address)
137 {
138 pool->freeOne(address);
139 }
140
141 void
142 StoreEntry::makePublic(const KeyScope scope)
143 {
144 /* This object can be cached for a long time */
145 if (!EBIT_TEST(flags, RELEASE_REQUEST))
146 setPublicKey(scope);
147 }
148
149 void
150 StoreEntry::makePrivate()
151 {
152 /* This object should never be cached at all */
153 expireNow();
154 releaseRequest(); /* delete object when not used */
155 }
156
157 void
158 StoreEntry::cacheNegatively()
159 {
160 /* This object may be negatively cached */
161 negativeCache();
162 makePublic();
163 }
164
165 size_t
166 StoreEntry::inUseCount()
167 {
168 if (!pool)
169 return 0;
170 return pool->getInUseCount();
171 }
172
173 const char *
174 StoreEntry::getMD5Text() const
175 {
176 return storeKeyText((const cache_key *)key);
177 }
178
179 #include "comm.h"
180
181 void
182 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
183 {
184 StoreEntry *anEntry = (StoreEntry *)theContext;
185 anEntry->delayAwareRead(aRead.conn,
186 aRead.buf,
187 aRead.len,
188 aRead.callback);
189 }
190
191 void
192 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
193 {
194 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
195 /* sketch: readdeferer* = getdeferer.
196 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
197 */
198
199 if (amountToRead <= 0) {
200 assert (mem_obj);
201 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
202 return;
203 }
204
205 if (fd_table[conn->fd].closing()) {
206 // Readers must have closing callbacks if they want to be notified. No
207 // readers appeared to care around 2009/12/14 as they skipped reading
208 // for other reasons. Closing may already be true at the delyaAwareRead
209 // call time or may happen while we wait after delayRead() above.
210 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
211 callback);
212 return; // the read callback will never be called
213 }
214
215 comm_read(conn, buf, amountToRead, callback);
216 }
217
218 size_t
219 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
220 {
221 if (mem_obj == NULL)
222 return aRange.end;
223
224 #if URL_CHECKSUM_DEBUG
225
226 mem_obj->checkUrlChecksum();
227
228 #endif
229
230 if (!mem_obj->readAheadPolicyCanRead())
231 return 0;
232
233 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
234 }
235
236 bool
237 StoreEntry::checkDeferRead(int) const
238 {
239 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
240 }
241
242 void
243 StoreEntry::setNoDelay(bool const newValue)
244 {
245 if (mem_obj)
246 mem_obj->setNoDelay(newValue);
247 }
248
249 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
250 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
251 // It does not mean we will read from disk exclusively (or at all!).
252 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
253 // XXX: Collapsed clients cannot predict their type.
254 store_client_t
255 StoreEntry::storeClientType() const
256 {
257 /* The needed offset isn't in memory
258 * XXX TODO: this is wrong for range requests
259 * as the needed offset may *not* be 0, AND
260 * offset 0 in the memory object is the HTTP headers.
261 */
262
263 assert(mem_obj);
264
265 if (mem_obj->inmem_lo)
266 return STORE_DISK_CLIENT;
267
268 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
269 /* I don't think we should be adding clients to aborted entries */
270 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
271 return STORE_MEM_CLIENT;
272 }
273
274 if (store_status == STORE_OK) {
275 /* the object has completed. */
276
277 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
278 if (swap_status == SWAPOUT_DONE) {
279 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
280 if (mem_obj->endOffset() == mem_obj->object_sz) {
281 /* hot object fully swapped in (XXX: or swapped out?) */
282 return STORE_MEM_CLIENT;
283 }
284 } else {
285 /* Memory-only, or currently being swapped out */
286 return STORE_MEM_CLIENT;
287 }
288 }
289 return STORE_DISK_CLIENT;
290 }
291
292 /* here and past, entry is STORE_PENDING */
293 /*
294 * If this is the first client, let it be the mem client
295 */
296 if (mem_obj->nclients == 1)
297 return STORE_MEM_CLIENT;
298
299 /*
300 * If there is no disk file to open yet, we must make this a
301 * mem client. If we can't open the swapin file before writing
302 * to the client, there is no guarantee that we will be able
303 * to open it later when we really need it.
304 */
305 if (swap_status == SWAPOUT_NONE)
306 return STORE_MEM_CLIENT;
307
308 /*
309 * otherwise, make subsequent clients read from disk so they
310 * can not delay the first, and vice-versa.
311 */
312 return STORE_DISK_CLIENT;
313 }
314
315 StoreEntry::StoreEntry() :
316 mem_obj(NULL),
317 timestamp(-1),
318 lastref(-1),
319 expires(-1),
320 lastModified_(-1),
321 swap_file_sz(0),
322 refcount(0),
323 flags(0),
324 swap_filen(-1),
325 swap_dirn(-1),
326 mem_status(NOT_IN_MEMORY),
327 ping_status(PING_NONE),
328 store_status(STORE_PENDING),
329 swap_status(SWAPOUT_NONE),
330 lock_count(0)
331 {
332 debugs(20, 5, "StoreEntry constructed, this=" << this);
333 }
334
335 StoreEntry::~StoreEntry()
336 {
337 debugs(20, 5, "StoreEntry destructed, this=" << this);
338 }
339
340 #if USE_ADAPTATION
341 void
342 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
343 {
344 if (!deferredProducer)
345 deferredProducer = producer;
346 else
347 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
348 *deferredProducer << ", requested call: " << *producer);
349 }
350
351 void
352 StoreEntry::kickProducer()
353 {
354 if (deferredProducer != NULL) {
355 ScheduleCallHere(deferredProducer);
356 deferredProducer = NULL;
357 }
358 }
359 #endif
360
361 void
362 StoreEntry::destroyMemObject()
363 {
364 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
365
366 if (MemObject *mem = mem_obj) {
367 // Store::Root() is FATALly missing during shutdown
368 if (mem->xitTable.index >= 0 && !shutting_down)
369 Store::Root().transientsDisconnect(*mem);
370 if (mem->memCache.index >= 0 && !shutting_down)
371 Store::Root().memoryDisconnect(*this);
372
373 setMemStatus(NOT_IN_MEMORY);
374 mem_obj = NULL;
375 delete mem;
376 }
377 }
378
379 void
380 destroyStoreEntry(void *data)
381 {
382 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
383 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
384 assert(e != NULL);
385
386 if (e == NullStoreEntry::getInstance())
387 return;
388
389 // Store::Root() is FATALly missing during shutdown
390 if (e->swap_filen >= 0 && !shutting_down)
391 e->disk().disconnect(*e);
392
393 e->destroyMemObject();
394
395 e->hashDelete();
396
397 assert(e->key == NULL);
398
399 delete e;
400 }
401
402 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
403
404 void
405 StoreEntry::hashInsert(const cache_key * someKey)
406 {
407 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
408 key = storeKeyDup(someKey);
409 hash_join(store_table, this);
410 }
411
412 void
413 StoreEntry::hashDelete()
414 {
415 if (key) { // some test cases do not create keys and do not hashInsert()
416 hash_remove_link(store_table, this);
417 storeKeyFree((const cache_key *)key);
418 key = NULL;
419 }
420 }
421
422 /* -------------------------------------------------------------------------- */
423
424 /* get rid of memory copy of the object */
425 void
426 StoreEntry::purgeMem()
427 {
428 if (mem_obj == NULL)
429 return;
430
431 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
432
433 Store::Root().memoryUnlink(*this);
434
435 if (swap_status != SWAPOUT_DONE)
436 release();
437 }
438
439 void
440 StoreEntry::lock(const char *context)
441 {
442 ++lock_count;
443 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
444 }
445
446 void
447 StoreEntry::touch()
448 {
449 lastref = squid_curtime;
450 }
451
452 void
453 StoreEntry::setReleaseFlag()
454 {
455 if (EBIT_TEST(flags, RELEASE_REQUEST))
456 return;
457
458 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
459
460 EBIT_SET(flags, RELEASE_REQUEST);
461
462 Store::Root().markForUnlink(*this);
463 }
464
465 void
466 StoreEntry::releaseRequest()
467 {
468 if (EBIT_TEST(flags, RELEASE_REQUEST))
469 return;
470
471 setReleaseFlag(); // makes validToSend() false, preventing future hits
472
473 setPrivateKey();
474 }
475
476 int
477 StoreEntry::unlock(const char *context)
478 {
479 debugs(20, 3, (context ? context : "somebody") <<
480 " unlocking key " << getMD5Text() << ' ' << *this);
481 assert(lock_count > 0);
482 --lock_count;
483
484 if (lock_count)
485 return (int) lock_count;
486
487 if (store_status == STORE_PENDING)
488 setReleaseFlag();
489
490 assert(storePendingNClients(this) == 0);
491
492 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
493 this->release();
494 return 0;
495 }
496
497 if (EBIT_TEST(flags, KEY_PRIVATE))
498 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
499
500 Store::Root().handleIdleEntry(*this); // may delete us
501 return 0;
502 }
503
504 void
505 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
506 {
507 assert (aClient);
508 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
509
510 if (!result)
511 aClient->created (NullStoreEntry::getInstance());
512 else
513 aClient->created (result);
514 }
515
516 void
517 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
518 {
519 assert (aClient);
520 StoreEntry *result = storeGetPublicByRequest (request);
521
522 if (!result)
523 result = NullStoreEntry::getInstance();
524
525 aClient->created (result);
526 }
527
528 void
529 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
530 {
531 assert (aClient);
532 StoreEntry *result = storeGetPublic (uri, method);
533
534 if (!result)
535 result = NullStoreEntry::getInstance();
536
537 aClient->created (result);
538 }
539
540 StoreEntry *
541 storeGetPublic(const char *uri, const HttpRequestMethod& method)
542 {
543 return Store::Root().get(storeKeyPublic(uri, method));
544 }
545
546 StoreEntry *
547 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
548 {
549 return Store::Root().get(storeKeyPublicByRequestMethod(req, method, keyScope));
550 }
551
552 StoreEntry *
553 storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
554 {
555 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
556
557 if (e == NULL && req->method == Http::METHOD_HEAD)
558 /* We can generate a HEAD reply from a cached GET object */
559 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
560
561 return e;
562 }
563
564 static int
565 getKeyCounter(void)
566 {
567 static int key_counter = 0;
568
569 if (++key_counter < 0)
570 key_counter = 1;
571
572 return key_counter;
573 }
574
575 /* RBC 20050104 AFAICT this should become simpler:
576 * rather than reinserting with a special key it should be marked
577 * as 'released' and then cleaned up when refcounting indicates.
578 * the StoreHashIndex could well implement its 'released' in the
579 * current manner.
580 * Also, clean log writing should skip over ia,t
581 * Otherwise, we need a 'remove from the index but not the store
582 * concept'.
583 */
584 void
585 StoreEntry::setPrivateKey()
586 {
587 if (key && EBIT_TEST(flags, KEY_PRIVATE))
588 return; /* is already private */
589
590 if (key) {
591 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
592
593 // TODO: move into SwapDir::markForUnlink() already called by Root()
594 if (swap_filen > -1)
595 storeDirSwapLog(this, SWAP_LOG_DEL);
596
597 hashDelete();
598 }
599
600 if (mem_obj && mem_obj->hasUris())
601 mem_obj->id = getKeyCounter();
602 const cache_key *newkey = storeKeyPrivate();
603
604 assert(hash_lookup(store_table, newkey) == NULL);
605 EBIT_SET(flags, KEY_PRIVATE);
606 hashInsert(newkey);
607 }
608
609 void
610 StoreEntry::setPublicKey(const KeyScope scope)
611 {
612 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
613 return; /* is already public */
614
615 assert(mem_obj);
616
617 /*
618 * We can't make RELEASE_REQUEST objects public. Depending on
619 * when RELEASE_REQUEST gets set, we might not be swapping out
620 * the object. If we're not swapping out, then subsequent
621 * store clients won't be able to access object data which has
622 * been freed from memory.
623 *
624 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
625 */
626 #if MORE_DEBUG_OUTPUT
627
628 if (EBIT_TEST(flags, RELEASE_REQUEST))
629 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
630
631 #endif
632
633 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
634
635 adjustVary();
636 forcePublicKey(calcPublicKey(scope));
637 }
638
639 void
640 StoreEntry::clearPublicKeyScope()
641 {
642 if (!key || EBIT_TEST(flags, KEY_PRIVATE))
643 return; // probably the old public key was deleted or made private
644
645 // TODO: adjustVary() when collapsed revalidation supports that
646
647 const cache_key *newKey = calcPublicKey(ksDefault);
648 if (!storeKeyHashCmp(key, newKey))
649 return; // probably another collapsed revalidation beat us to this change
650
651 forcePublicKey(newKey);
652 }
653
654 /// Unconditionally sets public key for this store entry.
655 /// Releases the old entry with the same public key (if any).
656 void
657 StoreEntry::forcePublicKey(const cache_key *newkey)
658 {
659 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
660 assert(e2 != this);
661 debugs(20, 3, "Making old " << *e2 << " private.");
662 e2->setPrivateKey();
663 e2->release();
664 }
665
666 if (key)
667 hashDelete();
668
669 EBIT_CLR(flags, KEY_PRIVATE);
670
671 hashInsert(newkey);
672
673 if (swap_filen > -1)
674 storeDirSwapLog(this, SWAP_LOG_ADD);
675 }
676
677 /// Calculates correct public key for feeding forcePublicKey().
678 /// Assumes adjustVary() has been called for this entry already.
679 const cache_key *
680 StoreEntry::calcPublicKey(const KeyScope keyScope)
681 {
682 assert(mem_obj);
683 return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) :
684 storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
685 }
686
687 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
688 /// The vary_headers field is used to calculate the Vary marker key.
689 /// Releases the old Vary marker with an outdated key (if any).
690 void
691 StoreEntry::adjustVary()
692 {
693 assert(mem_obj);
694
695 if (!mem_obj->request)
696 return;
697
698 HttpRequestPointer request(mem_obj->request);
699
700 if (mem_obj->vary_headers.isEmpty()) {
701 /* First handle the case where the object no longer varies */
702 request->vary_headers.clear();
703 } else {
704 if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
705 /* Oops.. the variance has changed. Kill the base object
706 * to record the new variance key
707 */
708 request->vary_headers.clear(); /* free old "bad" variance key */
709 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
710 pe->release();
711 }
712
713 /* Make sure the request knows the variance status */
714 if (request->vary_headers.isEmpty())
715 request->vary_headers = httpMakeVaryMark(request.getRaw(), mem_obj->getReply().getRaw());
716 }
717
718 // TODO: storeGetPublic() calls below may create unlocked entries.
719 // We should add/use storeHas() API or lock/unlock those entries.
720 if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
721 /* Create "vary" base object */
722 String vary;
723 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
724 /* We are allowed to do this typecast */
725 HttpReply *rep = new HttpReply;
726 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
727 vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
728
729 if (vary.size()) {
730 /* Again, we own this structure layout */
731 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
732 vary.clean();
733 }
734
735 #if X_ACCELERATOR_VARY
736 vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
737
738 if (vary.size() > 0) {
739 /* Again, we own this structure layout */
740 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
741 vary.clean();
742 }
743
744 #endif
745 pe->replaceHttpReply(rep, false); // no write until key is public
746
747 pe->timestampsSet();
748
749 pe->makePublic();
750
751 pe->startWriting(); // after makePublic()
752
753 pe->complete();
754
755 pe->unlock("StoreEntry::forcePublicKey+Vary");
756 }
757 }
758
759 StoreEntry *
760 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
761 {
762 StoreEntry *e = NULL;
763 debugs(20, 3, "storeCreateEntry: '" << url << "'");
764
765 e = new StoreEntry();
766 e->makeMemObject();
767 e->mem_obj->setUris(url, log_url, method);
768
769 if (flags.cachable) {
770 EBIT_CLR(e->flags, RELEASE_REQUEST);
771 } else {
772 e->releaseRequest();
773 }
774
775 e->store_status = STORE_PENDING;
776 e->refcount = 0;
777 e->lastref = squid_curtime;
778 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
779 e->ping_status = PING_NONE;
780 EBIT_SET(e->flags, ENTRY_VALIDATED);
781 return e;
782 }
783
784 StoreEntry *
785 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
786 {
787 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
788 e->lock("storeCreateEntry");
789
790 if (neighbors_do_private_keys || !flags.hierarchical)
791 e->setPrivateKey();
792 else
793 e->setPublicKey();
794
795 return e;
796 }
797
798 /* Mark object as expired */
799 void
800 StoreEntry::expireNow()
801 {
802 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
803 expires = squid_curtime;
804 }
805
806 void
807 StoreEntry::write (StoreIOBuffer writeBuffer)
808 {
809 assert(mem_obj != NULL);
810 /* This assert will change when we teach the store to update */
811 PROF_start(StoreEntry_write);
812 assert(store_status == STORE_PENDING);
813
814 // XXX: caller uses content offset, but we also store headers
815 if (const HttpReplyPointer reply = mem_obj->getReply())
816 writeBuffer.offset += reply->hdr_sz;
817
818 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
819 PROF_stop(StoreEntry_write);
820 storeGetMemSpace(writeBuffer.length);
821 mem_obj->write(writeBuffer);
822
823 if (!EBIT_TEST(flags, DELAY_SENDING))
824 invokeHandlers();
825 }
826
827 /* Append incoming data from a primary server to an entry. */
828 void
829 StoreEntry::append(char const *buf, int len)
830 {
831 assert(mem_obj != NULL);
832 assert(len >= 0);
833 assert(store_status == STORE_PENDING);
834
835 StoreIOBuffer tempBuffer;
836 tempBuffer.data = (char *)buf;
837 tempBuffer.length = len;
838 /*
839 * XXX sigh, offset might be < 0 here, but it gets "corrected"
840 * later. This offset crap is such a mess.
841 */
842 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
843 write(tempBuffer);
844 }
845
846 void
847 StoreEntry::vappendf(const char *fmt, va_list vargs)
848 {
849 LOCAL_ARRAY(char, buf, 4096);
850 *buf = 0;
851 int x;
852
853 #ifdef VA_COPY
854 va_args ap;
855 /* Fix of bug 753r. The value of vargs is undefined
856 * after vsnprintf() returns. Make a copy of vargs
857 * incase we loop around and call vsnprintf() again.
858 */
859 VA_COPY(ap,vargs);
860 errno = 0;
861 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
862 fatal(xstrerr(errno));
863 return;
864 }
865 va_end(ap);
866 #else /* VA_COPY */
867 errno = 0;
868 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
869 fatal(xstrerr(errno));
870 return;
871 }
872 #endif /*VA_COPY*/
873
874 if (x < static_cast<int>(sizeof(buf))) {
875 append(buf, x);
876 return;
877 }
878
879 // okay, do it the slow way.
880 char *buf2 = new char[x+1];
881 int y = vsnprintf(buf2, x+1, fmt, vargs);
882 assert(y >= 0 && y == x);
883 append(buf2, y);
884 delete[] buf2;
885 }
886
887 // deprecated. use StoreEntry::appendf() instead.
888 void
889 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
890 {
891 va_list args;
892 va_start(args, fmt);
893 e->vappendf(fmt, args);
894 va_end(args);
895 }
896
897 // deprecated. use StoreEntry::appendf() instead.
898 void
899 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
900 {
901 e->vappendf(fmt, vargs);
902 }
903
904 struct _store_check_cachable_hist {
905
906 struct {
907 int non_get;
908 int not_entry_cachable;
909 int wrong_content_length;
910 int negative_cached;
911 int too_big;
912 int too_small;
913 int private_key;
914 int too_many_open_files;
915 int too_many_open_fds;
916 int missing_parts;
917 } no;
918
919 struct {
920 int Default;
921 } yes;
922 } store_check_cachable_hist;
923
924 int
925 storeTooManyDiskFilesOpen(void)
926 {
927 if (Config.max_open_disk_fds == 0)
928 return 0;
929
930 if (store_open_disk_fd > Config.max_open_disk_fds)
931 return 1;
932
933 return 0;
934 }
935
936 int
937 StoreEntry::checkTooSmall()
938 {
939 if (EBIT_TEST(flags, ENTRY_SPECIAL))
940 return 0;
941
942 if (STORE_OK == store_status)
943 if (mem_obj->object_sz >= 0 &&
944 mem_obj->object_sz < Config.Store.minObjectSize)
945 return 1;
946 if (getReply()->content_length > -1)
947 if (getReply()->content_length < Config.Store.minObjectSize)
948 return 1;
949 return 0;
950 }
951
952 bool
953 StoreEntry::checkTooBig() const
954 {
955 if (mem_obj->endOffset() > store_maxobjsize)
956 return true;
957
958 if (getReply()->content_length < 0)
959 return false;
960
961 return (getReply()->content_length > store_maxobjsize);
962 }
963
964 // TODO: move "too many open..." checks outside -- we are called too early/late
965 bool
966 StoreEntry::checkCachable()
967 {
968 // XXX: This method is used for both memory and disk caches, but some
969 // checks are specific to disk caches. Move them to mayStartSwapOut().
970
971 // XXX: This method may be called several times, sometimes with different
972 // outcomes, making store_check_cachable_hist counters misleading.
973
974 // check this first to optimize handling of repeated calls for uncachables
975 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
976 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
977 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
978 return 0; // avoid rerequesting release below
979 }
980
981 #if CACHE_ALL_METHODS
982
983 if (mem_obj->method != Http::METHOD_GET) {
984 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
985 ++store_check_cachable_hist.no.non_get;
986 } else
987 #endif
988 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
989 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
990 ++store_check_cachable_hist.no.wrong_content_length;
991 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
992 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
993 ++store_check_cachable_hist.no.negative_cached;
994 return 0; /* avoid release call below */
995 } else if (!mem_obj || !getReply()) {
996 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
997 // this segfault protection, but how can we get such a HIT?
998 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
999 ++store_check_cachable_hist.no.missing_parts;
1000 } else if (checkTooBig()) {
1001 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1002 ++store_check_cachable_hist.no.too_big;
1003 } else if (checkTooSmall()) {
1004 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1005 ++store_check_cachable_hist.no.too_small;
1006 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1007 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1008 ++store_check_cachable_hist.no.private_key;
1009 } else if (swap_status != SWAPOUT_NONE) {
1010 /*
1011 * here we checked the swap_status because the remaining
1012 * cases are only relevant only if we haven't started swapping
1013 * out the object yet.
1014 */
1015 return 1;
1016 } else if (storeTooManyDiskFilesOpen()) {
1017 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1018 ++store_check_cachable_hist.no.too_many_open_files;
1019 } else if (fdNFree() < RESERVED_FD) {
1020 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1021 ++store_check_cachable_hist.no.too_many_open_fds;
1022 } else {
1023 ++store_check_cachable_hist.yes.Default;
1024 return 1;
1025 }
1026
1027 releaseRequest();
1028 return 0;
1029 }
1030
1031 void
1032 storeCheckCachableStats(StoreEntry *sentry)
1033 {
1034 storeAppendPrintf(sentry, "Category\t Count\n");
1035
1036 #if CACHE_ALL_METHODS
1037
1038 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1039 store_check_cachable_hist.no.non_get);
1040 #endif
1041
1042 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1043 store_check_cachable_hist.no.not_entry_cachable);
1044 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1045 store_check_cachable_hist.no.wrong_content_length);
1046 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1047 store_check_cachable_hist.no.negative_cached);
1048 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1049 store_check_cachable_hist.no.missing_parts);
1050 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1051 store_check_cachable_hist.no.too_big);
1052 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1053 store_check_cachable_hist.no.too_small);
1054 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1055 store_check_cachable_hist.no.private_key);
1056 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1057 store_check_cachable_hist.no.too_many_open_files);
1058 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1059 store_check_cachable_hist.no.too_many_open_fds);
1060 storeAppendPrintf(sentry, "yes.default\t%d\n",
1061 store_check_cachable_hist.yes.Default);
1062 }
1063
1064 void
1065 StoreEntry::lengthWentBad(const char *reason)
1066 {
1067 debugs(20, 3, "because " << reason << ": " << *this);
1068 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1069 releaseRequest();
1070 }
1071
1072 void
1073 StoreEntry::complete()
1074 {
1075 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1076
1077 if (store_status != STORE_PENDING) {
1078 /*
1079 * if we're not STORE_PENDING, then probably we got aborted
1080 * and there should be NO clients on this entry
1081 */
1082 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1083 assert(mem_obj->nclients == 0);
1084 return;
1085 }
1086
1087 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1088 * in use of object_sz?
1089 */
1090 mem_obj->object_sz = mem_obj->endOffset();
1091
1092 store_status = STORE_OK;
1093
1094 assert(mem_status == NOT_IN_MEMORY);
1095
1096 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1097 lengthWentBad("!validLength() in complete()");
1098
1099 #if USE_CACHE_DIGESTS
1100 if (mem_obj->request)
1101 mem_obj->request->hier.store_complete_stop = current_time;
1102
1103 #endif
1104 /*
1105 * We used to call invokeHandlers, then storeSwapOut. However,
1106 * Madhukar Reddy <myreddy@persistence.com> reported that
1107 * responses without content length would sometimes get released
1108 * in client_side, thinking that the response is incomplete.
1109 */
1110 invokeHandlers();
1111 }
1112
1113 /*
1114 * Someone wants to abort this transfer. Set the reason in the
1115 * request structure, call the callback and mark the
1116 * entry for releasing
1117 */
1118 void
1119 StoreEntry::abort()
1120 {
1121 ++statCounter.aborted_requests;
1122 assert(store_status == STORE_PENDING);
1123 assert(mem_obj != NULL);
1124 debugs(20, 6, "storeAbort: " << getMD5Text());
1125
1126 lock("StoreEntry::abort"); /* lock while aborting */
1127 negativeCache();
1128
1129 releaseRequest();
1130
1131 EBIT_SET(flags, ENTRY_ABORTED);
1132
1133 setMemStatus(NOT_IN_MEMORY);
1134
1135 store_status = STORE_OK;
1136
1137 /* Notify the server side */
1138
1139 /*
1140 * DPW 2007-05-07
1141 * Should we check abort.data for validity?
1142 */
1143 if (mem_obj->abort.callback) {
1144 if (!cbdataReferenceValid(mem_obj->abort.data))
1145 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1146 eventAdd("mem_obj->abort.callback",
1147 mem_obj->abort.callback,
1148 mem_obj->abort.data,
1149 0.0,
1150 true);
1151 unregisterAbort();
1152 }
1153
1154 /* XXX Should we reverse these two, so that there is no
1155 * unneeded disk swapping triggered?
1156 */
1157 /* Notify the client side */
1158 invokeHandlers();
1159
1160 // abort swap out, invalidating what was created so far (release follows)
1161 swapOutFileClose(StoreIOState::writerGone);
1162
1163 unlock("StoreEntry::abort"); /* unlock */
1164 }
1165
1166 /**
1167 * Clear Memory storage to accommodate the given object len
1168 */
1169 void
1170 storeGetMemSpace(int size)
1171 {
1172 PROF_start(storeGetMemSpace);
1173 StoreEntry *e = NULL;
1174 int released = 0;
1175 static time_t last_check = 0;
1176 size_t pages_needed;
1177 RemovalPurgeWalker *walker;
1178
1179 if (squid_curtime == last_check) {
1180 PROF_stop(storeGetMemSpace);
1181 return;
1182 }
1183
1184 last_check = squid_curtime;
1185
1186 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1187
1188 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1189 PROF_stop(storeGetMemSpace);
1190 return;
1191 }
1192
1193 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1194 " pages");
1195
1196 /* XXX what to set as max_scan here? */
1197 walker = mem_policy->PurgeInit(mem_policy, 100000);
1198
1199 while ((e = walker->Next(walker))) {
1200 e->purgeMem();
1201 ++released;
1202
1203 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1204 break;
1205 }
1206
1207 walker->Done(walker);
1208 debugs(20, 3, "storeGetMemSpace stats:");
1209 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1210 debugs(20, 3, " " << std::setw(6) << released << " were released");
1211 PROF_stop(storeGetMemSpace);
1212 }
1213
1214 /* thunk through to Store::Root().maintain(). Note that this would be better still
1215 * if registered against the root store itself, but that requires more complex
1216 * update logic - bigger fish to fry first. Long term each store when
1217 * it becomes active will self register
1218 */
1219 void
1220 Store::Maintain(void *)
1221 {
1222 Store::Root().maintain();
1223
1224 /* Reregister a maintain event .. */
1225 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1226
1227 }
1228
1229 /* The maximum objects to scan for maintain storage space */
1230 #define MAINTAIN_MAX_SCAN 1024
1231 #define MAINTAIN_MAX_REMOVE 64
1232
1233 /* release an object from a cache */
1234 void
1235 StoreEntry::release()
1236 {
1237 PROF_start(storeRelease);
1238 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1239 /* If, for any reason we can't discard this object because of an
1240 * outstanding request, mark it for pending release */
1241
1242 if (locked()) {
1243 expireNow();
1244 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1245 releaseRequest();
1246 PROF_stop(storeRelease);
1247 return;
1248 }
1249
1250 if (Store::Controller::store_dirs_rebuilding && swap_filen > -1) {
1251 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1252
1253 Store::Root().memoryUnlink(*this);
1254
1255 setPrivateKey();
1256
1257 // lock the entry until rebuilding is done
1258 lock("storeLateRelease");
1259 setReleaseFlag();
1260 LateReleaseStack.push(this);
1261 return;
1262 }
1263
1264 storeLog(STORE_LOG_RELEASE, this);
1265 if (swap_filen > -1 && !EBIT_TEST(flags, KEY_PRIVATE)) {
1266 // log before unlink() below clears swap_filen
1267 storeDirSwapLog(this, SWAP_LOG_DEL);
1268 }
1269
1270 Store::Root().unlink(*this);
1271 destroyStoreEntry(static_cast<hash_link *>(this));
1272 PROF_stop(storeRelease);
1273 }
1274
1275 static void
1276 storeLateRelease(void *)
1277 {
1278 StoreEntry *e;
1279 static int n = 0;
1280
1281 if (Store::Controller::store_dirs_rebuilding) {
1282 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1283 return;
1284 }
1285
1286 // TODO: this works but looks unelegant.
1287 for (int i = 0; i < 10; ++i) {
1288 if (LateReleaseStack.empty()) {
1289 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1290 return;
1291 } else {
1292 e = LateReleaseStack.top();
1293 LateReleaseStack.pop();
1294 }
1295
1296 e->unlock("storeLateRelease");
1297 ++n;
1298 }
1299
1300 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1301 }
1302
1303 /* return 1 if a store entry is locked */
1304 int
1305 StoreEntry::locked() const
1306 {
1307 if (lock_count)
1308 return 1;
1309
1310 /*
1311 * SPECIAL, PUBLIC entries should be "locked";
1312 * XXX: Their owner should lock them then instead of relying on this hack.
1313 */
1314 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1315 if (!EBIT_TEST(flags, KEY_PRIVATE))
1316 return 1;
1317
1318 return 0;
1319 }
1320
1321 bool
1322 StoreEntry::validLength() const
1323 {
1324 int64_t diff;
1325 const HttpReply *reply;
1326 assert(mem_obj != NULL);
1327 reply = getReply();
1328 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1329 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1330 objectLen());
1331 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1332 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1333
1334 if (reply->content_length < 0) {
1335 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1336 return 1;
1337 }
1338
1339 if (reply->hdr_sz == 0) {
1340 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1341 return 1;
1342 }
1343
1344 if (mem_obj->method == Http::METHOD_HEAD) {
1345 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1346 return 1;
1347 }
1348
1349 if (reply->sline.status() == Http::scNotModified)
1350 return 1;
1351
1352 if (reply->sline.status() == Http::scNoContent)
1353 return 1;
1354
1355 diff = reply->hdr_sz + reply->content_length - objectLen();
1356
1357 if (diff == 0)
1358 return 1;
1359
1360 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1361
1362 return 0;
1363 }
1364
1365 static void
1366 storeRegisterWithCacheManager(void)
1367 {
1368 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1369 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1370 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1371 storeCheckCachableStats, 0, 1);
1372 }
1373
1374 void
1375 storeInit(void)
1376 {
1377 storeKeyInit();
1378 mem_policy = createRemovalPolicy(Config.memPolicy);
1379 storeDigestInit();
1380 storeLogOpen();
1381 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1382 Store::Root().init();
1383 storeRebuildStart();
1384
1385 storeRegisterWithCacheManager();
1386 }
1387
1388 void
1389 storeConfigure(void)
1390 {
1391 Store::Root().updateLimits();
1392 }
1393
1394 bool
1395 StoreEntry::memoryCachable()
1396 {
1397 if (!checkCachable())
1398 return 0;
1399
1400 if (mem_obj == NULL)
1401 return 0;
1402
1403 if (mem_obj->data_hdr.size() == 0)
1404 return 0;
1405
1406 if (mem_obj->inmem_lo != 0)
1407 return 0;
1408
1409 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1410 return 0;
1411
1412 return 1;
1413 }
1414
1415 int
1416 StoreEntry::checkNegativeHit() const
1417 {
1418 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1419 return 0;
1420
1421 if (expires <= squid_curtime)
1422 return 0;
1423
1424 if (store_status != STORE_OK)
1425 return 0;
1426
1427 return 1;
1428 }
1429
1430 /**
1431 * Set object for negative caching.
1432 * Preserves any expiry information given by the server.
1433 * In absence of proper expiry info it will set to expire immediately,
1434 * or with HTTP-violations enabled the configured negative-TTL is observed
1435 */
1436 void
1437 StoreEntry::negativeCache()
1438 {
1439 // XXX: should make the default for expires 0 instead of -1
1440 // so we can distinguish "Expires: -1" from nothing.
1441 if (expires <= 0)
1442 #if USE_HTTP_VIOLATIONS
1443 expires = squid_curtime + Config.negativeTtl;
1444 #else
1445 expires = squid_curtime;
1446 #endif
1447 EBIT_SET(flags, ENTRY_NEGCACHED);
1448 }
1449
1450 void
1451 storeFreeMemory(void)
1452 {
1453 Store::FreeMemory();
1454 #if USE_CACHE_DIGESTS
1455 delete store_digest;
1456 #endif
1457 store_digest = NULL;
1458 }
1459
1460 int
1461 expiresMoreThan(time_t expires, time_t when)
1462 {
1463 if (expires < 0) /* No Expires given */
1464 return 1;
1465
1466 return (expires > (squid_curtime + when));
1467 }
1468
1469 int
1470 StoreEntry::validToSend() const
1471 {
1472 if (EBIT_TEST(flags, RELEASE_REQUEST))
1473 return 0;
1474
1475 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1476 if (expires <= squid_curtime)
1477 return 0;
1478
1479 if (EBIT_TEST(flags, ENTRY_ABORTED))
1480 return 0;
1481
1482 // now check that the entry has a cache backing or is collapsed
1483 if (swap_filen > -1) // backed by a disk cache
1484 return 1;
1485
1486 if (swappingOut()) // will be backed by a disk cache
1487 return 1;
1488
1489 if (!mem_obj) // not backed by a memory cache and not collapsed
1490 return 0;
1491
1492 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1493 // disk cache backing that store_client constructor will assert. XXX: This
1494 // is wrong for range requests (that could feed off nibbled memory) and for
1495 // entries backed by the shared memory cache (that could, in theory, get
1496 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1497 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1498 return 0;
1499
1500 // The following check is correct but useless at this position. TODO: Move
1501 // it up when the shared memory cache can either replenish locally nibbled
1502 // bytes or, better, does not use local RAM copy at all.
1503 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1504 // return 1;
1505
1506 return 1;
1507 }
1508
1509 bool
1510 StoreEntry::timestampsSet()
1511 {
1512 const HttpReply *reply = getReply();
1513 time_t served_date = reply->date;
1514 int age = reply->header.getInt(Http::HdrType::AGE);
1515 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1516 /* make sure that 0 <= served_date <= squid_curtime */
1517
1518 if (served_date < 0 || served_date > squid_curtime)
1519 served_date = squid_curtime;
1520
1521 /* Bug 1791:
1522 * If the returned Date: is more than 24 hours older than
1523 * the squid_curtime, then one of us needs to use NTP to set our
1524 * clock. We'll pretend that our clock is right.
1525 */
1526 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1527 served_date = squid_curtime;
1528
1529 /*
1530 * Compensate with Age header if origin server clock is ahead
1531 * of us and there is a cache in between us and the origin
1532 * server. But DONT compensate if the age value is larger than
1533 * squid_curtime because it results in a negative served_date.
1534 */
1535 if (age > squid_curtime - served_date)
1536 if (squid_curtime > age)
1537 served_date = squid_curtime - age;
1538
1539 // compensate for Squid-to-server and server-to-Squid delays
1540 if (mem_obj && mem_obj->request) {
1541 const time_t request_sent =
1542 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1543 if (0 < request_sent && request_sent < squid_curtime)
1544 served_date -= (squid_curtime - request_sent);
1545 }
1546
1547 time_t exp = 0;
1548 if (reply->expires > 0 && reply->date > -1)
1549 exp = served_date + (reply->expires - reply->date);
1550 else
1551 exp = reply->expires;
1552
1553 if (timestamp == served_date && expires == exp) {
1554 // if the reply lacks LMT, then we now know that our effective
1555 // LMT (i.e., timestamp) will stay the same, otherwise, old and
1556 // new modification times must match
1557 if (reply->last_modified < 0 || reply->last_modified == lastModified())
1558 return false; // nothing has changed
1559 }
1560
1561 expires = exp;
1562
1563 lastModified_ = reply->last_modified;
1564
1565 timestamp = served_date;
1566
1567 return true;
1568 }
1569
1570 void
1571 StoreEntry::registerAbort(STABH * cb, void *data)
1572 {
1573 assert(mem_obj);
1574 assert(mem_obj->abort.callback == NULL);
1575 mem_obj->abort.callback = cb;
1576 mem_obj->abort.data = cbdataReference(data);
1577 }
1578
1579 void
1580 StoreEntry::unregisterAbort()
1581 {
1582 assert(mem_obj);
1583 if (mem_obj->abort.callback) {
1584 mem_obj->abort.callback = NULL;
1585 cbdataReferenceDone(mem_obj->abort.data);
1586 }
1587 }
1588
1589 void
1590 StoreEntry::dump(int l) const
1591 {
1592 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1593 debugs(20, l, "StoreEntry->next: " << next);
1594 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1595 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1596 debugs(20, l, "StoreEntry->lastref: " << lastref);
1597 debugs(20, l, "StoreEntry->expires: " << expires);
1598 debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
1599 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1600 debugs(20, l, "StoreEntry->refcount: " << refcount);
1601 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1602 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1603 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1604 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1605 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1606 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1607 debugs(20, l, "StoreEntry->store_status: " << store_status);
1608 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1609 }
1610
1611 /*
1612 * NOTE, this function assumes only two mem states
1613 */
1614 void
1615 StoreEntry::setMemStatus(mem_status_t new_status)
1616 {
1617 if (new_status == mem_status)
1618 return;
1619
1620 // are we using a shared memory cache?
1621 if (Config.memShared && IamWorkerProcess()) {
1622 // This method was designed to update replacement policy, not to
1623 // actually purge something from the memory cache (TODO: rename?).
1624 // Shared memory cache does not have a policy that needs updates.
1625 mem_status = new_status;
1626 return;
1627 }
1628
1629 assert(mem_obj != NULL);
1630
1631 if (new_status == IN_MEMORY) {
1632 assert(mem_obj->inmem_lo == 0);
1633
1634 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1635 debugs(20, 4, "not inserting special " << *this << " into policy");
1636 } else {
1637 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1638 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1639 }
1640
1641 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1642 } else {
1643 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1644 debugs(20, 4, "not removing special " << *this << " from policy");
1645 } else {
1646 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1647 debugs(20, 4, "removed " << *this);
1648 }
1649
1650 --hot_obj_count;
1651 }
1652
1653 mem_status = new_status;
1654 }
1655
1656 const char *
1657 StoreEntry::url() const
1658 {
1659 if (mem_obj == NULL)
1660 return "[null_mem_obj]";
1661 else
1662 return mem_obj->storeId();
1663 }
1664
1665 MemObject *
1666 StoreEntry::makeMemObject()
1667 {
1668 if (!mem_obj)
1669 mem_obj = new MemObject();
1670 return mem_obj;
1671 }
1672
1673 void
1674 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1675 {
1676 makeMemObject();
1677 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1678 }
1679
1680 /** disable sending content to the clients.
1681 *
1682 * This just sets DELAY_SENDING.
1683 */
1684 void
1685 StoreEntry::buffer()
1686 {
1687 EBIT_SET(flags, DELAY_SENDING);
1688 }
1689
1690 /** flush any buffered content.
1691 *
1692 * This just clears DELAY_SENDING and Invokes the handlers
1693 * to begin sending anything that may be buffered.
1694 */
1695 void
1696 StoreEntry::flush()
1697 {
1698 if (EBIT_TEST(flags, DELAY_SENDING)) {
1699 EBIT_CLR(flags, DELAY_SENDING);
1700 invokeHandlers();
1701 }
1702 }
1703
1704 int64_t
1705 StoreEntry::objectLen() const
1706 {
1707 assert(mem_obj != NULL);
1708 return mem_obj->object_sz;
1709 }
1710
1711 int64_t
1712 StoreEntry::contentLen() const
1713 {
1714 assert(mem_obj != NULL);
1715 assert(getReply() != NULL);
1716 return objectLen() - getReply()->hdr_sz;
1717 }
1718
1719 HttpReply const *
1720 StoreEntry::getReply() const
1721 {
1722 return (mem_obj ? mem_obj->getReply().getRaw() : nullptr);
1723 }
1724
1725 void
1726 StoreEntry::reset()
1727 {
1728 assert (mem_obj);
1729 debugs(20, 3, url());
1730 mem_obj->reset();
1731 expires = lastModified_ = timestamp = -1;
1732 }
1733
1734 /*
1735 * storeFsInit
1736 *
1737 * This routine calls the SETUP routine for each fs type.
1738 * I don't know where the best place for this is, and I'm not going to shuffle
1739 * around large chunks of code right now (that can be done once its working.)
1740 */
1741 void
1742 storeFsInit(void)
1743 {
1744 storeReplSetup();
1745 }
1746
1747 /*
1748 * called to add another store removal policy module
1749 */
1750 void
1751 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1752 {
1753 int i;
1754
1755 /* find the number of currently known repl types */
1756 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1757 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1758 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1759 return;
1760 }
1761 }
1762
1763 /* add the new type */
1764 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1765
1766 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1767
1768 storerepl_list[i].typestr = type;
1769
1770 storerepl_list[i].create = create;
1771 }
1772
1773 /*
1774 * Create a removal policy instance
1775 */
1776 RemovalPolicy *
1777 createRemovalPolicy(RemovalPolicySettings * settings)
1778 {
1779 storerepl_entry_t *r;
1780
1781 for (r = storerepl_list; r && r->typestr; ++r) {
1782 if (strcmp(r->typestr, settings->type) == 0)
1783 return r->create(settings->args);
1784 }
1785
1786 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1787 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1788 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1789 fatalf("ERROR: Unknown policy %s\n", settings->type);
1790 return NULL; /* NOTREACHED */
1791 }
1792
1793 #if 0
1794 void
1795 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1796 {
1797 if (e->swap_file_number == filn)
1798 return;
1799
1800 if (filn < 0) {
1801 assert(-1 == filn);
1802 storeDirMapBitReset(e->swap_file_number);
1803 storeDirLRUDelete(e);
1804 e->swap_file_number = -1;
1805 } else {
1806 assert(-1 == e->swap_file_number);
1807 storeDirMapBitSet(e->swap_file_number = filn);
1808 storeDirLRUAdd(e);
1809 }
1810 }
1811
1812 #endif
1813
1814 void
1815 StoreEntry::storeErrorResponse(HttpReply *reply)
1816 {
1817 lock("StoreEntry::storeErrorResponse");
1818 buffer();
1819 replaceHttpReply(reply);
1820 flush();
1821 complete();
1822 negativeCache();
1823 releaseRequest();
1824 unlock("StoreEntry::storeErrorResponse");
1825 }
1826
1827 /*
1828 * Replace a store entry with
1829 * a new reply. This eats the reply.
1830 */
1831 void
1832 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1833 {
1834 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1835
1836 if (!mem_obj) {
1837 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1838 return;
1839 }
1840
1841 mem_obj->replaceReply(HttpReplyPointer(rep));
1842
1843 if (andStartWriting)
1844 startWriting();
1845 }
1846
1847 void
1848 StoreEntry::startWriting()
1849 {
1850 /* TODO: when we store headers separately remove the header portion */
1851 /* TODO: mark the length of the headers ? */
1852 /* We ONLY want the headers */
1853
1854 assert (isEmpty());
1855 assert(mem_obj);
1856
1857 const HttpReply *rep = getReply();
1858 assert(rep);
1859
1860 buffer();
1861 rep->packHeadersInto(this);
1862 mem_obj->markEndOfReplyHeaders();
1863 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1864
1865 rep->body.packInto(this);
1866 flush();
1867 }
1868
1869 char const *
1870 StoreEntry::getSerialisedMetaData()
1871 {
1872 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1873 int swap_hdr_sz;
1874 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1875 storeSwapTLVFree(tlv_list);
1876 assert (swap_hdr_sz >= 0);
1877 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1878 return result;
1879 }
1880
1881 /**
1882 * Abandon the transient entry our worker has created if neither the shared
1883 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1884 * any, should notice and use Plan B instead of getting stuck waiting for us
1885 * to start swapping the entry out.
1886 */
1887 void
1888 StoreEntry::transientsAbandonmentCheck()
1889 {
1890 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1891 mem_obj->xitTable.index >= 0 && // other workers may be interested
1892 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1893 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1894 debugs(20, 7, "cannot be shared: " << *this);
1895 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1896 Store::Root().transientsAbandon(*this);
1897 }
1898 }
1899
1900 void
1901 StoreEntry::memOutDecision(const bool)
1902 {
1903 transientsAbandonmentCheck();
1904 }
1905
1906 void
1907 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1908 {
1909 // Abandon our transient entry if neither shared memory nor disk wants it.
1910 assert(mem_obj);
1911 mem_obj->swapout.decision = decision;
1912 transientsAbandonmentCheck();
1913 }
1914
1915 void
1916 StoreEntry::trimMemory(const bool preserveSwappable)
1917 {
1918 /*
1919 * DPW 2007-05-09
1920 * Bug #1943. We must not let go any data for IN_MEMORY
1921 * objects. We have to wait until the mem_status changes.
1922 */
1923 if (mem_status == IN_MEMORY)
1924 return;
1925
1926 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1927 return; // cannot trim because we do not load them again
1928
1929 if (preserveSwappable)
1930 mem_obj->trimSwappable();
1931 else
1932 mem_obj->trimUnSwappable();
1933
1934 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1935 }
1936
1937 bool
1938 StoreEntry::modifiedSince(const time_t ims, const int imslen) const
1939 {
1940 int object_length;
1941 const time_t mod_time = lastModified();
1942
1943 debugs(88, 3, "modifiedSince: '" << url() << "'");
1944
1945 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1946
1947 if (mod_time < 0)
1948 return true;
1949
1950 /* Find size of the object */
1951 object_length = getReply()->content_length;
1952
1953 if (object_length < 0)
1954 object_length = contentLen();
1955
1956 if (mod_time > ims) {
1957 debugs(88, 3, "--> YES: entry newer than client");
1958 return true;
1959 } else if (mod_time < ims) {
1960 debugs(88, 3, "--> NO: entry older than client");
1961 return false;
1962 } else if (imslen < 0) {
1963 debugs(88, 3, "--> NO: same LMT, no client length");
1964 return false;
1965 } else if (imslen == object_length) {
1966 debugs(88, 3, "--> NO: same LMT, same length");
1967 return false;
1968 } else {
1969 debugs(88, 3, "--> YES: same LMT, different length");
1970 return true;
1971 }
1972 }
1973
1974 bool
1975 StoreEntry::hasEtag(ETag &etag) const
1976 {
1977 if (const HttpReply *reply = getReply()) {
1978 etag = reply->header.getETag(Http::HdrType::ETAG);
1979 if (etag.str)
1980 return true;
1981 }
1982 return false;
1983 }
1984
1985 bool
1986 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1987 {
1988 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
1989 return hasOneOfEtags(reqETags, false);
1990 }
1991
1992 bool
1993 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1994 {
1995 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
1996 // weak comparison is allowed only for HEAD or full-body GET requests
1997 const bool allowWeakMatch = !request.flags.isRanged &&
1998 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1999 return hasOneOfEtags(reqETags, allowWeakMatch);
2000 }
2001
2002 /// whether at least one of the request ETags matches entity ETag
2003 bool
2004 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2005 {
2006 const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
2007 if (!repETag.str)
2008 return strListIsMember(&reqETags, "*", ',');
2009
2010 bool matched = false;
2011 const char *pos = NULL;
2012 const char *item;
2013 int ilen;
2014 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2015 if (!strncmp(item, "*", ilen))
2016 matched = true;
2017 else {
2018 String str;
2019 str.append(item, ilen);
2020 ETag reqETag;
2021 if (etagParseInit(&reqETag, str.termedBuf())) {
2022 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2023 etagIsStrongEqual(repETag, reqETag);
2024 }
2025 }
2026 }
2027 return matched;
2028 }
2029
2030 Store::Disk &
2031 StoreEntry::disk() const
2032 {
2033 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2034 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
2035 assert(sd);
2036 return *sd;
2037 }
2038
2039 /*
2040 * return true if the entry is in a state where
2041 * it can accept more data (ie with write() method)
2042 */
2043 bool
2044 StoreEntry::isAccepting() const
2045 {
2046 if (STORE_PENDING != store_status)
2047 return false;
2048
2049 if (EBIT_TEST(flags, ENTRY_ABORTED))
2050 return false;
2051
2052 return true;
2053 }
2054
2055 const char *
2056 StoreEntry::describeTimestamps() const
2057 {
2058 LOCAL_ARRAY(char, buf, 256);
2059 snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2060 static_cast<int>(timestamp),
2061 static_cast<int>(lastref),
2062 static_cast<int>(lastModified_),
2063 static_cast<int>(expires));
2064 return buf;
2065 }
2066
2067 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2068 {
2069 os << "e:";
2070
2071 if (e.mem_obj) {
2072 if (e.mem_obj->xitTable.index > -1)
2073 os << 't' << e.mem_obj->xitTable.index;
2074 if (e.mem_obj->memCache.index > -1)
2075 os << 'm' << e.mem_obj->memCache.index;
2076 }
2077 if (e.swap_filen > -1 || e.swap_dirn > -1)
2078 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2079
2080 os << '=';
2081
2082 // print only non-default status values, using unique letters
2083 if (e.mem_status != NOT_IN_MEMORY ||
2084 e.store_status != STORE_PENDING ||
2085 e.swap_status != SWAPOUT_NONE ||
2086 e.ping_status != PING_NONE) {
2087 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2088 if (e.store_status != STORE_PENDING) os << 's';
2089 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2090 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2091 }
2092
2093 // print only set flags, using unique letters
2094 if (e.flags) {
2095 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2096 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
2097 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2098 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2099 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2100 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
2101 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2102 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2103 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2104 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2105 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2106 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2107 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2108 }
2109
2110 if (e.mem_obj && e.mem_obj->smpCollapsed)
2111 os << 'O';
2112
2113 return os << '/' << &e << '*' << e.locks();
2114 }
2115
2116 /* NullStoreEntry */
2117
2118 NullStoreEntry NullStoreEntry::_instance;
2119
2120 NullStoreEntry *
2121 NullStoreEntry::getInstance()
2122 {
2123 return &_instance;
2124 }
2125
2126 char const *
2127 NullStoreEntry::getMD5Text() const
2128 {
2129 return "N/A";
2130 }
2131
2132 void
2133 NullStoreEntry::operator delete(void*)
2134 {
2135 fatal ("Attempt to delete NullStoreEntry\n");
2136 }
2137
2138 char const *
2139 NullStoreEntry::getSerialisedMetaData()
2140 {
2141 return NULL;
2142 }
2143