]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Cleanup: Refactor ConnStateData pipeline handling
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
16 #include "ETag.h"
17 #include "event.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "http.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
23 #include "mem_node.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
33 #include "stmem.h"
34 #include "Store.h"
35 #include "store/Controller.h"
36 #include "store/Disk.h"
37 #include "store/Disks.h"
38 #include "store_digest.h"
39 #include "store_key_md5.h"
40 #include "store_key_md5.h"
41 #include "store_log.h"
42 #include "store_rebuild.h"
43 #include "StoreClient.h"
44 #include "StoreIOState.h"
45 #include "StoreMeta.h"
46 #include "StrList.h"
47 #include "swap_log_op.h"
48 #include "tools.h"
49 #if USE_DELAY_POOLS
50 #include "DelayPools.h"
51 #endif
52
53 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
54 * XXX: convert to MEMPROXY_CLASS() API
55 */
56 #include "mem/Pool.h"
57
58 #include <climits>
59 #include <stack>
60
61 #define REBUILD_TIMESTAMP_DELTA_MAX 2
62
63 #define STORE_IN_MEM_BUCKETS (229)
64
65 /** \todo Convert these string constants to enum string-arrays generated */
66
67 const char *memStatusStr[] = {
68 "NOT_IN_MEMORY",
69 "IN_MEMORY"
70 };
71
72 const char *pingStatusStr[] = {
73 "PING_NONE",
74 "PING_WAITING",
75 "PING_DONE"
76 };
77
78 const char *storeStatusStr[] = {
79 "STORE_OK",
80 "STORE_PENDING"
81 };
82
83 const char *swapStatusStr[] = {
84 "SWAPOUT_NONE",
85 "SWAPOUT_WRITING",
86 "SWAPOUT_DONE"
87 };
88
89 /*
90 * This defines an repl type
91 */
92
93 typedef struct _storerepl_entry storerepl_entry_t;
94
95 struct _storerepl_entry {
96 const char *typestr;
97 REMOVALPOLICYCREATE *create;
98 };
99
100 static storerepl_entry_t *storerepl_list = NULL;
101
102 /*
103 * local function prototypes
104 */
105 static int getKeyCounter(void);
106 static OBJH storeCheckCachableStats;
107 static EVH storeLateRelease;
108
109 /*
110 * local variables
111 */
112 static std::stack<StoreEntry*> LateReleaseStack;
113 MemAllocator *StoreEntry::pool = NULL;
114
115 void
116 Store::Stats(StoreEntry * output)
117 {
118 assert(output);
119 Root().stat(*output);
120 }
121
122 void
123 StoreEntry::makePublic()
124 {
125 /* This object can be cached for a long time */
126
127 if (!EBIT_TEST(flags, RELEASE_REQUEST))
128 setPublicKey();
129 }
130
131 void
132 StoreEntry::makePrivate()
133 {
134 /* This object should never be cached at all */
135 expireNow();
136 releaseRequest(); /* delete object when not used */
137 }
138
139 void
140 StoreEntry::cacheNegatively()
141 {
142 /* This object may be negatively cached */
143 negativeCache();
144 makePublic();
145 }
146
147 size_t
148 StoreEntry::inUseCount()
149 {
150 if (!pool)
151 return 0;
152 return pool->getInUseCount();
153 }
154
155 const char *
156 StoreEntry::getMD5Text() const
157 {
158 return storeKeyText((const cache_key *)key);
159 }
160
161 #include "comm.h"
162
163 void
164 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
165 {
166 StoreEntry *anEntry = (StoreEntry *)theContext;
167 anEntry->delayAwareRead(aRead.conn,
168 aRead.buf,
169 aRead.len,
170 aRead.callback);
171 }
172
173 void
174 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
175 {
176 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
177 /* sketch: readdeferer* = getdeferer.
178 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
179 */
180
181 if (amountToRead == 0) {
182 assert (mem_obj);
183 /* read ahead limit */
184 /* Perhaps these two calls should both live in MemObject */
185 #if USE_DELAY_POOLS
186 if (!mem_obj->readAheadPolicyCanRead()) {
187 #endif
188 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
189 return;
190 #if USE_DELAY_POOLS
191 }
192
193 /* delay id limit */
194 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
195 return;
196
197 #endif
198
199 }
200
201 if (fd_table[conn->fd].closing()) {
202 // Readers must have closing callbacks if they want to be notified. No
203 // readers appeared to care around 2009/12/14 as they skipped reading
204 // for other reasons. Closing may already be true at the delyaAwareRead
205 // call time or may happen while we wait after delayRead() above.
206 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
207 callback);
208 return; // the read callback will never be called
209 }
210
211 comm_read(conn, buf, amountToRead, callback);
212 }
213
214 size_t
215 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
216 {
217 if (mem_obj == NULL)
218 return aRange.end;
219
220 #if URL_CHECKSUM_DEBUG
221
222 mem_obj->checkUrlChecksum();
223
224 #endif
225
226 if (!mem_obj->readAheadPolicyCanRead())
227 return 0;
228
229 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
230 }
231
232 bool
233 StoreEntry::checkDeferRead(int) const
234 {
235 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
236 }
237
238 void
239 StoreEntry::setNoDelay(bool const newValue)
240 {
241 if (mem_obj)
242 mem_obj->setNoDelay(newValue);
243 }
244
245 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
246 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
247 // It does not mean we will read from disk exclusively (or at all!).
248 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
249 // XXX: Collapsed clients cannot predict their type.
250 store_client_t
251 StoreEntry::storeClientType() const
252 {
253 /* The needed offset isn't in memory
254 * XXX TODO: this is wrong for range requests
255 * as the needed offset may *not* be 0, AND
256 * offset 0 in the memory object is the HTTP headers.
257 */
258
259 assert(mem_obj);
260
261 if (mem_obj->inmem_lo)
262 return STORE_DISK_CLIENT;
263
264 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
265 /* I don't think we should be adding clients to aborted entries */
266 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
267 return STORE_MEM_CLIENT;
268 }
269
270 if (store_status == STORE_OK) {
271 /* the object has completed. */
272
273 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
274 if (swap_status == SWAPOUT_DONE) {
275 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
276 if (mem_obj->endOffset() == mem_obj->object_sz) {
277 /* hot object fully swapped in (XXX: or swapped out?) */
278 return STORE_MEM_CLIENT;
279 }
280 } else {
281 /* Memory-only, or currently being swapped out */
282 return STORE_MEM_CLIENT;
283 }
284 }
285 return STORE_DISK_CLIENT;
286 }
287
288 /* here and past, entry is STORE_PENDING */
289 /*
290 * If this is the first client, let it be the mem client
291 */
292 if (mem_obj->nclients == 1)
293 return STORE_MEM_CLIENT;
294
295 /*
296 * If there is no disk file to open yet, we must make this a
297 * mem client. If we can't open the swapin file before writing
298 * to the client, there is no guarantee that we will be able
299 * to open it later when we really need it.
300 */
301 if (swap_status == SWAPOUT_NONE)
302 return STORE_MEM_CLIENT;
303
304 /*
305 * otherwise, make subsequent clients read from disk so they
306 * can not delay the first, and vice-versa.
307 */
308 return STORE_DISK_CLIENT;
309 }
310
311 StoreEntry::StoreEntry() :
312 mem_obj(NULL),
313 timestamp(-1),
314 lastref(-1),
315 expires(-1),
316 lastmod(-1),
317 swap_file_sz(0),
318 refcount(0),
319 flags(0),
320 swap_filen(-1),
321 swap_dirn(-1),
322 mem_status(NOT_IN_MEMORY),
323 ping_status(PING_NONE),
324 store_status(STORE_PENDING),
325 swap_status(SWAPOUT_NONE),
326 lock_count(0)
327 {
328 debugs(20, 5, "StoreEntry constructed, this=" << this);
329 }
330
331 StoreEntry::~StoreEntry()
332 {
333 debugs(20, 5, "StoreEntry destructed, this=" << this);
334 }
335
336 #if USE_ADAPTATION
337 void
338 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
339 {
340 if (!deferredProducer)
341 deferredProducer = producer;
342 else
343 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
344 *deferredProducer << ", requested call: " << *producer);
345 }
346
347 void
348 StoreEntry::kickProducer()
349 {
350 if (deferredProducer != NULL) {
351 ScheduleCallHere(deferredProducer);
352 deferredProducer = NULL;
353 }
354 }
355 #endif
356
357 void
358 StoreEntry::destroyMemObject()
359 {
360 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
361
362 if (MemObject *mem = mem_obj) {
363 // Store::Root() is FATALly missing during shutdown
364 if (mem->xitTable.index >= 0 && !shutting_down)
365 Store::Root().transientsDisconnect(*mem);
366 if (mem->memCache.index >= 0 && !shutting_down)
367 Store::Root().memoryDisconnect(*this);
368
369 setMemStatus(NOT_IN_MEMORY);
370 mem_obj = NULL;
371 delete mem;
372 }
373 }
374
375 void
376 destroyStoreEntry(void *data)
377 {
378 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
379 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
380 assert(e != NULL);
381
382 if (e == NullStoreEntry::getInstance())
383 return;
384
385 // Store::Root() is FATALly missing during shutdown
386 if (e->swap_filen >= 0 && !shutting_down)
387 e->disk().disconnect(*e);
388
389 e->destroyMemObject();
390
391 e->hashDelete();
392
393 assert(e->key == NULL);
394
395 delete e;
396 }
397
398 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
399
400 void
401 StoreEntry::hashInsert(const cache_key * someKey)
402 {
403 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
404 key = storeKeyDup(someKey);
405 hash_join(store_table, this);
406 }
407
408 void
409 StoreEntry::hashDelete()
410 {
411 if (key) { // some test cases do not create keys and do not hashInsert()
412 hash_remove_link(store_table, this);
413 storeKeyFree((const cache_key *)key);
414 key = NULL;
415 }
416 }
417
418 /* -------------------------------------------------------------------------- */
419
420 /* get rid of memory copy of the object */
421 void
422 StoreEntry::purgeMem()
423 {
424 if (mem_obj == NULL)
425 return;
426
427 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
428
429 Store::Root().memoryUnlink(*this);
430
431 if (swap_status != SWAPOUT_DONE)
432 release();
433 }
434
435 void
436 StoreEntry::lock(const char *context)
437 {
438 ++lock_count;
439 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
440 }
441
442 void
443 StoreEntry::touch()
444 {
445 lastref = squid_curtime;
446 }
447
448 void
449 StoreEntry::setReleaseFlag()
450 {
451 if (EBIT_TEST(flags, RELEASE_REQUEST))
452 return;
453
454 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
455
456 EBIT_SET(flags, RELEASE_REQUEST);
457
458 Store::Root().markForUnlink(*this);
459 }
460
461 void
462 StoreEntry::releaseRequest()
463 {
464 if (EBIT_TEST(flags, RELEASE_REQUEST))
465 return;
466
467 setReleaseFlag(); // makes validToSend() false, preventing future hits
468
469 setPrivateKey();
470 }
471
472 int
473 StoreEntry::unlock(const char *context)
474 {
475 debugs(20, 3, (context ? context : "somebody") <<
476 " unlocking key " << getMD5Text() << ' ' << *this);
477 assert(lock_count > 0);
478 --lock_count;
479
480 if (lock_count)
481 return (int) lock_count;
482
483 if (store_status == STORE_PENDING)
484 setReleaseFlag();
485
486 assert(storePendingNClients(this) == 0);
487
488 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
489 this->release();
490 return 0;
491 }
492
493 if (EBIT_TEST(flags, KEY_PRIVATE))
494 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
495
496 Store::Root().handleIdleEntry(*this); // may delete us
497 return 0;
498 }
499
500 void
501 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
502 {
503 assert (aClient);
504 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
505
506 if (!result)
507 aClient->created (NullStoreEntry::getInstance());
508 else
509 aClient->created (result);
510 }
511
512 void
513 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
514 {
515 assert (aClient);
516 StoreEntry *result = storeGetPublicByRequest (request);
517
518 if (!result)
519 result = NullStoreEntry::getInstance();
520
521 aClient->created (result);
522 }
523
524 void
525 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
526 {
527 assert (aClient);
528 StoreEntry *result = storeGetPublic (uri, method);
529
530 if (!result)
531 result = NullStoreEntry::getInstance();
532
533 aClient->created (result);
534 }
535
536 StoreEntry *
537 storeGetPublic(const char *uri, const HttpRequestMethod& method)
538 {
539 return Store::Root().get(storeKeyPublic(uri, method));
540 }
541
542 StoreEntry *
543 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
544 {
545 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
546 }
547
548 StoreEntry *
549 storeGetPublicByRequest(HttpRequest * req)
550 {
551 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
552
553 if (e == NULL && req->method == Http::METHOD_HEAD)
554 /* We can generate a HEAD reply from a cached GET object */
555 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
556
557 return e;
558 }
559
560 static int
561 getKeyCounter(void)
562 {
563 static int key_counter = 0;
564
565 if (++key_counter < 0)
566 key_counter = 1;
567
568 return key_counter;
569 }
570
571 /* RBC 20050104 AFAICT this should become simpler:
572 * rather than reinserting with a special key it should be marked
573 * as 'released' and then cleaned up when refcounting indicates.
574 * the StoreHashIndex could well implement its 'released' in the
575 * current manner.
576 * Also, clean log writing should skip over ia,t
577 * Otherwise, we need a 'remove from the index but not the store
578 * concept'.
579 */
580 void
581 StoreEntry::setPrivateKey()
582 {
583 const cache_key *newkey;
584
585 if (key && EBIT_TEST(flags, KEY_PRIVATE))
586 return; /* is already private */
587
588 if (key) {
589 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
590
591 // TODO: move into SwapDir::markForUnlink() already called by Root()
592 if (swap_filen > -1)
593 storeDirSwapLog(this, SWAP_LOG_DEL);
594
595 hashDelete();
596 }
597
598 if (mem_obj && mem_obj->hasUris()) {
599 mem_obj->id = getKeyCounter();
600 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
601 } else {
602 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
603 }
604
605 assert(hash_lookup(store_table, newkey) == NULL);
606 EBIT_SET(flags, KEY_PRIVATE);
607 hashInsert(newkey);
608 }
609
610 void
611 StoreEntry::setPublicKey()
612 {
613 const cache_key *newkey;
614
615 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
616 return; /* is already public */
617
618 assert(mem_obj);
619
620 /*
621 * We can't make RELEASE_REQUEST objects public. Depending on
622 * when RELEASE_REQUEST gets set, we might not be swapping out
623 * the object. If we're not swapping out, then subsequent
624 * store clients won't be able to access object data which has
625 * been freed from memory.
626 *
627 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
628 */
629 #if MORE_DEBUG_OUTPUT
630
631 if (EBIT_TEST(flags, RELEASE_REQUEST))
632 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
633
634 #endif
635
636 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
637
638 if (mem_obj->request) {
639 HttpRequest *request = mem_obj->request;
640
641 if (!mem_obj->vary_headers) {
642 /* First handle the case where the object no longer varies */
643 safe_free(request->vary_headers);
644 } else {
645 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
646 /* Oops.. the variance has changed. Kill the base object
647 * to record the new variance key
648 */
649 safe_free(request->vary_headers); /* free old "bad" variance key */
650 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
651 pe->release();
652 }
653
654 /* Make sure the request knows the variance status */
655 if (!request->vary_headers) {
656 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
657
658 if (vary)
659 request->vary_headers = xstrdup(vary);
660 }
661 }
662
663 // TODO: storeGetPublic() calls below may create unlocked entries.
664 // We should add/use storeHas() API or lock/unlock those entries.
665 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
666 /* Create "vary" base object */
667 String vary;
668 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
669 /* We are allowed to do this typecast */
670 HttpReply *rep = new HttpReply;
671 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
672 vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
673
674 if (vary.size()) {
675 /* Again, we own this structure layout */
676 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
677 vary.clean();
678 }
679
680 #if X_ACCELERATOR_VARY
681 vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
682
683 if (vary.size() > 0) {
684 /* Again, we own this structure layout */
685 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
686 vary.clean();
687 }
688
689 #endif
690 pe->replaceHttpReply(rep, false); // no write until key is public
691
692 pe->timestampsSet();
693
694 pe->makePublic();
695
696 pe->startWriting(); // after makePublic()
697
698 pe->complete();
699
700 pe->unlock("StoreEntry::setPublicKey+Vary");
701 }
702
703 newkey = storeKeyPublicByRequest(mem_obj->request);
704 } else
705 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
706
707 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
708 debugs(20, 3, "Making old " << *e2 << " private.");
709 e2->setPrivateKey();
710 e2->release();
711
712 if (mem_obj->request)
713 newkey = storeKeyPublicByRequest(mem_obj->request);
714 else
715 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
716 }
717
718 if (key)
719 hashDelete();
720
721 EBIT_CLR(flags, KEY_PRIVATE);
722
723 hashInsert(newkey);
724
725 if (swap_filen > -1)
726 storeDirSwapLog(this, SWAP_LOG_ADD);
727 }
728
729 StoreEntry *
730 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
731 {
732 StoreEntry *e = NULL;
733 debugs(20, 3, "storeCreateEntry: '" << url << "'");
734
735 e = new StoreEntry();
736 e->makeMemObject();
737 e->mem_obj->setUris(url, log_url, method);
738
739 if (flags.cachable) {
740 EBIT_CLR(e->flags, RELEASE_REQUEST);
741 } else {
742 e->releaseRequest();
743 }
744
745 e->store_status = STORE_PENDING;
746 e->refcount = 0;
747 e->lastref = squid_curtime;
748 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
749 e->ping_status = PING_NONE;
750 EBIT_SET(e->flags, ENTRY_VALIDATED);
751 return e;
752 }
753
754 StoreEntry *
755 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
756 {
757 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
758 e->lock("storeCreateEntry");
759
760 if (neighbors_do_private_keys || !flags.hierarchical)
761 e->setPrivateKey();
762 else
763 e->setPublicKey();
764
765 return e;
766 }
767
768 /* Mark object as expired */
769 void
770 StoreEntry::expireNow()
771 {
772 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
773 expires = squid_curtime;
774 }
775
776 void
777 StoreEntry::write (StoreIOBuffer writeBuffer)
778 {
779 assert(mem_obj != NULL);
780 /* This assert will change when we teach the store to update */
781 PROF_start(StoreEntry_write);
782 assert(store_status == STORE_PENDING);
783
784 // XXX: caller uses content offset, but we also store headers
785 if (const HttpReply *reply = mem_obj->getReply())
786 writeBuffer.offset += reply->hdr_sz;
787
788 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
789 PROF_stop(StoreEntry_write);
790 storeGetMemSpace(writeBuffer.length);
791 mem_obj->write(writeBuffer);
792
793 if (!EBIT_TEST(flags, DELAY_SENDING))
794 invokeHandlers();
795 }
796
797 /* Append incoming data from a primary server to an entry. */
798 void
799 StoreEntry::append(char const *buf, int len)
800 {
801 assert(mem_obj != NULL);
802 assert(len >= 0);
803 assert(store_status == STORE_PENDING);
804
805 StoreIOBuffer tempBuffer;
806 tempBuffer.data = (char *)buf;
807 tempBuffer.length = len;
808 /*
809 * XXX sigh, offset might be < 0 here, but it gets "corrected"
810 * later. This offset crap is such a mess.
811 */
812 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
813 write(tempBuffer);
814 }
815
816 void
817 StoreEntry::vappendf(const char *fmt, va_list vargs)
818 {
819 LOCAL_ARRAY(char, buf, 4096);
820 *buf = 0;
821 int x;
822
823 #ifdef VA_COPY
824 va_args ap;
825 /* Fix of bug 753r. The value of vargs is undefined
826 * after vsnprintf() returns. Make a copy of vargs
827 * incase we loop around and call vsnprintf() again.
828 */
829 VA_COPY(ap,vargs);
830 errno = 0;
831 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
832 fatal(xstrerr(errno));
833 return;
834 }
835 va_end(ap);
836 #else /* VA_COPY */
837 errno = 0;
838 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
839 fatal(xstrerr(errno));
840 return;
841 }
842 #endif /*VA_COPY*/
843
844 if (x < static_cast<int>(sizeof(buf))) {
845 append(buf, x);
846 return;
847 }
848
849 // okay, do it the slow way.
850 char *buf2 = new char[x+1];
851 int y = vsnprintf(buf2, x+1, fmt, vargs);
852 assert(y >= 0 && y == x);
853 append(buf2, y);
854 delete[] buf2;
855 }
856
857 // deprecated. use StoreEntry::appendf() instead.
858 void
859 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
860 {
861 va_list args;
862 va_start(args, fmt);
863 e->vappendf(fmt, args);
864 va_end(args);
865 }
866
867 // deprecated. use StoreEntry::appendf() instead.
868 void
869 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
870 {
871 e->vappendf(fmt, vargs);
872 }
873
874 struct _store_check_cachable_hist {
875
876 struct {
877 int non_get;
878 int not_entry_cachable;
879 int wrong_content_length;
880 int negative_cached;
881 int too_big;
882 int too_small;
883 int private_key;
884 int too_many_open_files;
885 int too_many_open_fds;
886 int missing_parts;
887 } no;
888
889 struct {
890 int Default;
891 } yes;
892 } store_check_cachable_hist;
893
894 int
895 storeTooManyDiskFilesOpen(void)
896 {
897 if (Config.max_open_disk_fds == 0)
898 return 0;
899
900 if (store_open_disk_fd > Config.max_open_disk_fds)
901 return 1;
902
903 return 0;
904 }
905
906 int
907 StoreEntry::checkTooSmall()
908 {
909 if (EBIT_TEST(flags, ENTRY_SPECIAL))
910 return 0;
911
912 if (STORE_OK == store_status)
913 if (mem_obj->object_sz >= 0 &&
914 mem_obj->object_sz < Config.Store.minObjectSize)
915 return 1;
916 if (getReply()->content_length > -1)
917 if (getReply()->content_length < Config.Store.minObjectSize)
918 return 1;
919 return 0;
920 }
921
922 bool
923 StoreEntry::checkTooBig() const
924 {
925 if (mem_obj->endOffset() > store_maxobjsize)
926 return true;
927
928 if (getReply()->content_length < 0)
929 return false;
930
931 return (getReply()->content_length > store_maxobjsize);
932 }
933
934 // TODO: move "too many open..." checks outside -- we are called too early/late
935 bool
936 StoreEntry::checkCachable()
937 {
938 // XXX: This method is used for both memory and disk caches, but some
939 // checks are specific to disk caches. Move them to mayStartSwapOut().
940
941 // XXX: This method may be called several times, sometimes with different
942 // outcomes, making store_check_cachable_hist counters misleading.
943
944 // check this first to optimize handling of repeated calls for uncachables
945 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
946 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
947 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
948 return 0; // avoid rerequesting release below
949 }
950
951 #if CACHE_ALL_METHODS
952
953 if (mem_obj->method != Http::METHOD_GET) {
954 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
955 ++store_check_cachable_hist.no.non_get;
956 } else
957 #endif
958 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
959 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
960 ++store_check_cachable_hist.no.wrong_content_length;
961 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
962 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
963 ++store_check_cachable_hist.no.negative_cached;
964 return 0; /* avoid release call below */
965 } else if (!mem_obj || !getReply()) {
966 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
967 // this segfault protection, but how can we get such a HIT?
968 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
969 ++store_check_cachable_hist.no.missing_parts;
970 } else if (checkTooBig()) {
971 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
972 ++store_check_cachable_hist.no.too_big;
973 } else if (checkTooSmall()) {
974 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
975 ++store_check_cachable_hist.no.too_small;
976 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
977 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
978 ++store_check_cachable_hist.no.private_key;
979 } else if (swap_status != SWAPOUT_NONE) {
980 /*
981 * here we checked the swap_status because the remaining
982 * cases are only relevant only if we haven't started swapping
983 * out the object yet.
984 */
985 return 1;
986 } else if (storeTooManyDiskFilesOpen()) {
987 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
988 ++store_check_cachable_hist.no.too_many_open_files;
989 } else if (fdNFree() < RESERVED_FD) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
991 ++store_check_cachable_hist.no.too_many_open_fds;
992 } else {
993 ++store_check_cachable_hist.yes.Default;
994 return 1;
995 }
996
997 releaseRequest();
998 return 0;
999 }
1000
1001 void
1002 storeCheckCachableStats(StoreEntry *sentry)
1003 {
1004 storeAppendPrintf(sentry, "Category\t Count\n");
1005
1006 #if CACHE_ALL_METHODS
1007
1008 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1009 store_check_cachable_hist.no.non_get);
1010 #endif
1011
1012 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1013 store_check_cachable_hist.no.not_entry_cachable);
1014 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1015 store_check_cachable_hist.no.wrong_content_length);
1016 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1017 store_check_cachable_hist.no.negative_cached);
1018 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1019 store_check_cachable_hist.no.missing_parts);
1020 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1021 store_check_cachable_hist.no.too_big);
1022 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1023 store_check_cachable_hist.no.too_small);
1024 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1025 store_check_cachable_hist.no.private_key);
1026 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1027 store_check_cachable_hist.no.too_many_open_files);
1028 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1029 store_check_cachable_hist.no.too_many_open_fds);
1030 storeAppendPrintf(sentry, "yes.default\t%d\n",
1031 store_check_cachable_hist.yes.Default);
1032 }
1033
1034 void
1035 StoreEntry::lengthWentBad(const char *reason)
1036 {
1037 debugs(20, 3, "because " << reason << ": " << *this);
1038 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1039 releaseRequest();
1040 }
1041
1042 void
1043 StoreEntry::complete()
1044 {
1045 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1046
1047 if (store_status != STORE_PENDING) {
1048 /*
1049 * if we're not STORE_PENDING, then probably we got aborted
1050 * and there should be NO clients on this entry
1051 */
1052 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1053 assert(mem_obj->nclients == 0);
1054 return;
1055 }
1056
1057 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1058 * in use of object_sz?
1059 */
1060 mem_obj->object_sz = mem_obj->endOffset();
1061
1062 store_status = STORE_OK;
1063
1064 assert(mem_status == NOT_IN_MEMORY);
1065
1066 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1067 lengthWentBad("!validLength() in complete()");
1068
1069 #if USE_CACHE_DIGESTS
1070 if (mem_obj->request)
1071 mem_obj->request->hier.store_complete_stop = current_time;
1072
1073 #endif
1074 /*
1075 * We used to call invokeHandlers, then storeSwapOut. However,
1076 * Madhukar Reddy <myreddy@persistence.com> reported that
1077 * responses without content length would sometimes get released
1078 * in client_side, thinking that the response is incomplete.
1079 */
1080 invokeHandlers();
1081 }
1082
1083 /*
1084 * Someone wants to abort this transfer. Set the reason in the
1085 * request structure, call the callback and mark the
1086 * entry for releasing
1087 */
1088 void
1089 StoreEntry::abort()
1090 {
1091 ++statCounter.aborted_requests;
1092 assert(store_status == STORE_PENDING);
1093 assert(mem_obj != NULL);
1094 debugs(20, 6, "storeAbort: " << getMD5Text());
1095
1096 lock("StoreEntry::abort"); /* lock while aborting */
1097 negativeCache();
1098
1099 releaseRequest();
1100
1101 EBIT_SET(flags, ENTRY_ABORTED);
1102
1103 setMemStatus(NOT_IN_MEMORY);
1104
1105 store_status = STORE_OK;
1106
1107 /* Notify the server side */
1108
1109 /*
1110 * DPW 2007-05-07
1111 * Should we check abort.data for validity?
1112 */
1113 if (mem_obj->abort.callback) {
1114 if (!cbdataReferenceValid(mem_obj->abort.data))
1115 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1116 eventAdd("mem_obj->abort.callback",
1117 mem_obj->abort.callback,
1118 mem_obj->abort.data,
1119 0.0,
1120 true);
1121 unregisterAbort();
1122 }
1123
1124 /* XXX Should we reverse these two, so that there is no
1125 * unneeded disk swapping triggered?
1126 */
1127 /* Notify the client side */
1128 invokeHandlers();
1129
1130 // abort swap out, invalidating what was created so far (release follows)
1131 swapOutFileClose(StoreIOState::writerGone);
1132
1133 unlock("StoreEntry::abort"); /* unlock */
1134 }
1135
1136 /**
1137 * Clear Memory storage to accommodate the given object len
1138 */
1139 void
1140 storeGetMemSpace(int size)
1141 {
1142 PROF_start(storeGetMemSpace);
1143 StoreEntry *e = NULL;
1144 int released = 0;
1145 static time_t last_check = 0;
1146 size_t pages_needed;
1147 RemovalPurgeWalker *walker;
1148
1149 if (squid_curtime == last_check) {
1150 PROF_stop(storeGetMemSpace);
1151 return;
1152 }
1153
1154 last_check = squid_curtime;
1155
1156 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1157
1158 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1159 PROF_stop(storeGetMemSpace);
1160 return;
1161 }
1162
1163 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1164 " pages");
1165
1166 /* XXX what to set as max_scan here? */
1167 walker = mem_policy->PurgeInit(mem_policy, 100000);
1168
1169 while ((e = walker->Next(walker))) {
1170 e->purgeMem();
1171 ++released;
1172
1173 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1174 break;
1175 }
1176
1177 walker->Done(walker);
1178 debugs(20, 3, "storeGetMemSpace stats:");
1179 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1180 debugs(20, 3, " " << std::setw(6) << released << " were released");
1181 PROF_stop(storeGetMemSpace);
1182 }
1183
1184 /* thunk through to Store::Root().maintain(). Note that this would be better still
1185 * if registered against the root store itself, but that requires more complex
1186 * update logic - bigger fish to fry first. Long term each store when
1187 * it becomes active will self register
1188 */
1189 void
1190 Store::Maintain(void *)
1191 {
1192 Store::Root().maintain();
1193
1194 /* Reregister a maintain event .. */
1195 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1196
1197 }
1198
1199 /* The maximum objects to scan for maintain storage space */
1200 #define MAINTAIN_MAX_SCAN 1024
1201 #define MAINTAIN_MAX_REMOVE 64
1202
1203 /* release an object from a cache */
1204 void
1205 StoreEntry::release()
1206 {
1207 PROF_start(storeRelease);
1208 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1209 /* If, for any reason we can't discard this object because of an
1210 * outstanding request, mark it for pending release */
1211
1212 if (locked()) {
1213 expireNow();
1214 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1215 releaseRequest();
1216 PROF_stop(storeRelease);
1217 return;
1218 }
1219
1220 if (Store::Controller::store_dirs_rebuilding && swap_filen > -1) {
1221 /* TODO: Teach disk stores to handle releases during rebuild instead. */
1222
1223 Store::Root().memoryUnlink(*this);
1224
1225 setPrivateKey();
1226
1227 // lock the entry until rebuilding is done
1228 lock("storeLateRelease");
1229 setReleaseFlag();
1230 LateReleaseStack.push(this);
1231 return;
1232 }
1233
1234 storeLog(STORE_LOG_RELEASE, this);
1235 if (swap_filen > -1 && !EBIT_TEST(flags, KEY_PRIVATE)) {
1236 // log before unlink() below clears swap_filen
1237 storeDirSwapLog(this, SWAP_LOG_DEL);
1238 }
1239
1240 Store::Root().unlink(*this);
1241 destroyStoreEntry(static_cast<hash_link *>(this));
1242 PROF_stop(storeRelease);
1243 }
1244
1245 static void
1246 storeLateRelease(void *)
1247 {
1248 StoreEntry *e;
1249 static int n = 0;
1250
1251 if (Store::Controller::store_dirs_rebuilding) {
1252 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1253 return;
1254 }
1255
1256 // TODO: this works but looks unelegant.
1257 for (int i = 0; i < 10; ++i) {
1258 if (LateReleaseStack.empty()) {
1259 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1260 return;
1261 } else {
1262 e = LateReleaseStack.top();
1263 LateReleaseStack.pop();
1264 }
1265
1266 e->unlock("storeLateRelease");
1267 ++n;
1268 }
1269
1270 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1271 }
1272
1273 /* return 1 if a store entry is locked */
1274 int
1275 StoreEntry::locked() const
1276 {
1277 if (lock_count)
1278 return 1;
1279
1280 /*
1281 * SPECIAL, PUBLIC entries should be "locked";
1282 * XXX: Their owner should lock them then instead of relying on this hack.
1283 */
1284 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1285 if (!EBIT_TEST(flags, KEY_PRIVATE))
1286 return 1;
1287
1288 return 0;
1289 }
1290
1291 bool
1292 StoreEntry::validLength() const
1293 {
1294 int64_t diff;
1295 const HttpReply *reply;
1296 assert(mem_obj != NULL);
1297 reply = getReply();
1298 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1299 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1300 objectLen());
1301 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1302 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1303
1304 if (reply->content_length < 0) {
1305 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1306 return 1;
1307 }
1308
1309 if (reply->hdr_sz == 0) {
1310 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1311 return 1;
1312 }
1313
1314 if (mem_obj->method == Http::METHOD_HEAD) {
1315 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1316 return 1;
1317 }
1318
1319 if (reply->sline.status() == Http::scNotModified)
1320 return 1;
1321
1322 if (reply->sline.status() == Http::scNoContent)
1323 return 1;
1324
1325 diff = reply->hdr_sz + reply->content_length - objectLen();
1326
1327 if (diff == 0)
1328 return 1;
1329
1330 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1331
1332 return 0;
1333 }
1334
1335 static void
1336 storeRegisterWithCacheManager(void)
1337 {
1338 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1339 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1340 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1341 storeCheckCachableStats, 0, 1);
1342 }
1343
1344 void
1345 storeInit(void)
1346 {
1347 storeKeyInit();
1348 mem_policy = createRemovalPolicy(Config.memPolicy);
1349 storeDigestInit();
1350 storeLogOpen();
1351 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1352 Store::Root().init();
1353 storeRebuildStart();
1354
1355 storeRegisterWithCacheManager();
1356 }
1357
1358 /// computes maximum size of a cachable object
1359 /// larger objects are rejected by all (disk and memory) cache stores
1360 static int64_t
1361 storeCalcMaxObjSize()
1362 {
1363 int64_t ms = 0; // nothing can be cached without at least one store consent
1364
1365 // global maximum is at least the disk store maximum
1366 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1367 assert (Config.cacheSwap.swapDirs[i].getRaw());
1368 const int64_t storeMax = dynamic_cast<SwapDir *>(Config.cacheSwap.swapDirs[i].getRaw())->maxObjectSize();
1369 if (ms < storeMax)
1370 ms = storeMax;
1371 }
1372
1373 // global maximum is at least the memory store maximum
1374 // TODO: move this into a memory cache class when we have one
1375 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
1376 if (ms < memMax)
1377 ms = memMax;
1378
1379 return ms;
1380 }
1381
1382 void
1383 storeConfigure(void)
1384 {
1385 store_swap_high = (long) (((float) Store::Root().maxSize() *
1386 (float) Config.Swap.highWaterMark) / (float) 100);
1387 store_swap_low = (long) (((float) Store::Root().maxSize() *
1388 (float) Config.Swap.lowWaterMark) / (float) 100);
1389 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1390
1391 store_maxobjsize = storeCalcMaxObjSize();
1392 }
1393
1394 bool
1395 StoreEntry::memoryCachable()
1396 {
1397 if (!checkCachable())
1398 return 0;
1399
1400 if (mem_obj == NULL)
1401 return 0;
1402
1403 if (mem_obj->data_hdr.size() == 0)
1404 return 0;
1405
1406 if (mem_obj->inmem_lo != 0)
1407 return 0;
1408
1409 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1410 return 0;
1411
1412 return 1;
1413 }
1414
1415 int
1416 StoreEntry::checkNegativeHit() const
1417 {
1418 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1419 return 0;
1420
1421 if (expires <= squid_curtime)
1422 return 0;
1423
1424 if (store_status != STORE_OK)
1425 return 0;
1426
1427 return 1;
1428 }
1429
1430 /**
1431 * Set object for negative caching.
1432 * Preserves any expiry information given by the server.
1433 * In absence of proper expiry info it will set to expire immediately,
1434 * or with HTTP-violations enabled the configured negative-TTL is observed
1435 */
1436 void
1437 StoreEntry::negativeCache()
1438 {
1439 // XXX: should make the default for expires 0 instead of -1
1440 // so we can distinguish "Expires: -1" from nothing.
1441 if (expires <= 0)
1442 #if USE_HTTP_VIOLATIONS
1443 expires = squid_curtime + Config.negativeTtl;
1444 #else
1445 expires = squid_curtime;
1446 #endif
1447 EBIT_SET(flags, ENTRY_NEGCACHED);
1448 }
1449
1450 void
1451 storeFreeMemory(void)
1452 {
1453 Store::FreeMemory();
1454 #if USE_CACHE_DIGESTS
1455 delete store_digest;
1456 #endif
1457 store_digest = NULL;
1458 }
1459
1460 int
1461 expiresMoreThan(time_t expires, time_t when)
1462 {
1463 if (expires < 0) /* No Expires given */
1464 return 1;
1465
1466 return (expires > (squid_curtime + when));
1467 }
1468
1469 int
1470 StoreEntry::validToSend() const
1471 {
1472 if (EBIT_TEST(flags, RELEASE_REQUEST))
1473 return 0;
1474
1475 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1476 if (expires <= squid_curtime)
1477 return 0;
1478
1479 if (EBIT_TEST(flags, ENTRY_ABORTED))
1480 return 0;
1481
1482 // now check that the entry has a cache backing or is collapsed
1483 if (swap_filen > -1) // backed by a disk cache
1484 return 1;
1485
1486 if (swappingOut()) // will be backed by a disk cache
1487 return 1;
1488
1489 if (!mem_obj) // not backed by a memory cache and not collapsed
1490 return 0;
1491
1492 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1493 // disk cache backing that store_client constructor will assert. XXX: This
1494 // is wrong for range requests (that could feed off nibbled memory) and for
1495 // entries backed by the shared memory cache (that could, in theory, get
1496 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1497 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1498 return 0;
1499
1500 // The following check is correct but useless at this position. TODO: Move
1501 // it up when the shared memory cache can either replenish locally nibbled
1502 // bytes or, better, does not use local RAM copy at all.
1503 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1504 // return 1;
1505
1506 return 1;
1507 }
1508
1509 void
1510 StoreEntry::timestampsSet()
1511 {
1512 const HttpReply *reply = getReply();
1513 time_t served_date = reply->date;
1514 int age = reply->header.getInt(Http::HdrType::AGE);
1515 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1516 /* make sure that 0 <= served_date <= squid_curtime */
1517
1518 if (served_date < 0 || served_date > squid_curtime)
1519 served_date = squid_curtime;
1520
1521 /* Bug 1791:
1522 * If the returned Date: is more than 24 hours older than
1523 * the squid_curtime, then one of us needs to use NTP to set our
1524 * clock. We'll pretend that our clock is right.
1525 */
1526 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1527 served_date = squid_curtime;
1528
1529 /*
1530 * Compensate with Age header if origin server clock is ahead
1531 * of us and there is a cache in between us and the origin
1532 * server. But DONT compensate if the age value is larger than
1533 * squid_curtime because it results in a negative served_date.
1534 */
1535 if (age > squid_curtime - served_date)
1536 if (squid_curtime > age)
1537 served_date = squid_curtime - age;
1538
1539 // compensate for Squid-to-server and server-to-Squid delays
1540 if (mem_obj && mem_obj->request) {
1541 const time_t request_sent =
1542 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1543 if (0 < request_sent && request_sent < squid_curtime)
1544 served_date -= (squid_curtime - request_sent);
1545 }
1546
1547 if (reply->expires > 0 && reply->date > -1)
1548 expires = served_date + (reply->expires - reply->date);
1549 else
1550 expires = reply->expires;
1551
1552 lastmod = reply->last_modified;
1553
1554 timestamp = served_date;
1555 }
1556
1557 void
1558 StoreEntry::registerAbort(STABH * cb, void *data)
1559 {
1560 assert(mem_obj);
1561 assert(mem_obj->abort.callback == NULL);
1562 mem_obj->abort.callback = cb;
1563 mem_obj->abort.data = cbdataReference(data);
1564 }
1565
1566 void
1567 StoreEntry::unregisterAbort()
1568 {
1569 assert(mem_obj);
1570 if (mem_obj->abort.callback) {
1571 mem_obj->abort.callback = NULL;
1572 cbdataReferenceDone(mem_obj->abort.data);
1573 }
1574 }
1575
1576 void
1577 StoreEntry::dump(int l) const
1578 {
1579 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1580 debugs(20, l, "StoreEntry->next: " << next);
1581 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1582 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1583 debugs(20, l, "StoreEntry->lastref: " << lastref);
1584 debugs(20, l, "StoreEntry->expires: " << expires);
1585 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1586 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1587 debugs(20, l, "StoreEntry->refcount: " << refcount);
1588 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1589 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1590 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1591 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1592 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1593 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1594 debugs(20, l, "StoreEntry->store_status: " << store_status);
1595 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1596 }
1597
1598 /*
1599 * NOTE, this function assumes only two mem states
1600 */
1601 void
1602 StoreEntry::setMemStatus(mem_status_t new_status)
1603 {
1604 if (new_status == mem_status)
1605 return;
1606
1607 // are we using a shared memory cache?
1608 if (Config.memShared && IamWorkerProcess()) {
1609 // This method was designed to update replacement policy, not to
1610 // actually purge something from the memory cache (TODO: rename?).
1611 // Shared memory cache does not have a policy that needs updates.
1612 mem_status = new_status;
1613 return;
1614 }
1615
1616 assert(mem_obj != NULL);
1617
1618 if (new_status == IN_MEMORY) {
1619 assert(mem_obj->inmem_lo == 0);
1620
1621 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1622 debugs(20, 4, "not inserting special " << *this << " into policy");
1623 } else {
1624 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1625 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1626 }
1627
1628 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1629 } else {
1630 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1631 debugs(20, 4, "not removing special " << *this << " from policy");
1632 } else {
1633 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1634 debugs(20, 4, "removed " << *this);
1635 }
1636
1637 --hot_obj_count;
1638 }
1639
1640 mem_status = new_status;
1641 }
1642
1643 const char *
1644 StoreEntry::url() const
1645 {
1646 if (mem_obj == NULL)
1647 return "[null_mem_obj]";
1648 else
1649 return mem_obj->storeId();
1650 }
1651
1652 MemObject *
1653 StoreEntry::makeMemObject()
1654 {
1655 if (!mem_obj)
1656 mem_obj = new MemObject();
1657 return mem_obj;
1658 }
1659
1660 void
1661 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1662 {
1663 makeMemObject();
1664 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1665 }
1666
1667 /** disable sending content to the clients.
1668 *
1669 * This just sets DELAY_SENDING.
1670 */
1671 void
1672 StoreEntry::buffer()
1673 {
1674 EBIT_SET(flags, DELAY_SENDING);
1675 }
1676
1677 /** flush any buffered content.
1678 *
1679 * This just clears DELAY_SENDING and Invokes the handlers
1680 * to begin sending anything that may be buffered.
1681 */
1682 void
1683 StoreEntry::flush()
1684 {
1685 if (EBIT_TEST(flags, DELAY_SENDING)) {
1686 EBIT_CLR(flags, DELAY_SENDING);
1687 invokeHandlers();
1688 }
1689 }
1690
1691 int64_t
1692 StoreEntry::objectLen() const
1693 {
1694 assert(mem_obj != NULL);
1695 return mem_obj->object_sz;
1696 }
1697
1698 int64_t
1699 StoreEntry::contentLen() const
1700 {
1701 assert(mem_obj != NULL);
1702 assert(getReply() != NULL);
1703 return objectLen() - getReply()->hdr_sz;
1704 }
1705
1706 HttpReply const *
1707 StoreEntry::getReply () const
1708 {
1709 if (NULL == mem_obj)
1710 return NULL;
1711
1712 return mem_obj->getReply();
1713 }
1714
1715 void
1716 StoreEntry::reset()
1717 {
1718 assert (mem_obj);
1719 debugs(20, 3, "StoreEntry::reset: " << url());
1720 mem_obj->reset();
1721 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1722 rep->reset();
1723 expires = lastmod = timestamp = -1;
1724 }
1725
1726 /*
1727 * storeFsInit
1728 *
1729 * This routine calls the SETUP routine for each fs type.
1730 * I don't know where the best place for this is, and I'm not going to shuffle
1731 * around large chunks of code right now (that can be done once its working.)
1732 */
1733 void
1734 storeFsInit(void)
1735 {
1736 storeReplSetup();
1737 }
1738
1739 /*
1740 * called to add another store removal policy module
1741 */
1742 void
1743 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1744 {
1745 int i;
1746
1747 /* find the number of currently known repl types */
1748 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1749 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1750 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1751 return;
1752 }
1753 }
1754
1755 /* add the new type */
1756 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1757
1758 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1759
1760 storerepl_list[i].typestr = type;
1761
1762 storerepl_list[i].create = create;
1763 }
1764
1765 /*
1766 * Create a removal policy instance
1767 */
1768 RemovalPolicy *
1769 createRemovalPolicy(RemovalPolicySettings * settings)
1770 {
1771 storerepl_entry_t *r;
1772
1773 for (r = storerepl_list; r && r->typestr; ++r) {
1774 if (strcmp(r->typestr, settings->type) == 0)
1775 return r->create(settings->args);
1776 }
1777
1778 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1779 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1780 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1781 fatalf("ERROR: Unknown policy %s\n", settings->type);
1782 return NULL; /* NOTREACHED */
1783 }
1784
1785 #if 0
1786 void
1787 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1788 {
1789 if (e->swap_file_number == filn)
1790 return;
1791
1792 if (filn < 0) {
1793 assert(-1 == filn);
1794 storeDirMapBitReset(e->swap_file_number);
1795 storeDirLRUDelete(e);
1796 e->swap_file_number = -1;
1797 } else {
1798 assert(-1 == e->swap_file_number);
1799 storeDirMapBitSet(e->swap_file_number = filn);
1800 storeDirLRUAdd(e);
1801 }
1802 }
1803
1804 #endif
1805
1806 void
1807 StoreEntry::storeErrorResponse(HttpReply *reply)
1808 {
1809 lock("StoreEntry::storeErrorResponse");
1810 buffer();
1811 replaceHttpReply(reply);
1812 flush();
1813 complete();
1814 negativeCache();
1815 releaseRequest();
1816 unlock("StoreEntry::storeErrorResponse");
1817 }
1818
1819 /*
1820 * Replace a store entry with
1821 * a new reply. This eats the reply.
1822 */
1823 void
1824 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1825 {
1826 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1827
1828 if (!mem_obj) {
1829 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1830 return;
1831 }
1832
1833 mem_obj->replaceHttpReply(rep);
1834
1835 if (andStartWriting)
1836 startWriting();
1837 }
1838
1839 void
1840 StoreEntry::startWriting()
1841 {
1842 /* TODO: when we store headers separately remove the header portion */
1843 /* TODO: mark the length of the headers ? */
1844 /* We ONLY want the headers */
1845
1846 assert (isEmpty());
1847 assert(mem_obj);
1848
1849 const HttpReply *rep = getReply();
1850 assert(rep);
1851
1852 buffer();
1853 rep->packHeadersInto(this);
1854 mem_obj->markEndOfReplyHeaders();
1855 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1856
1857 rep->body.packInto(this);
1858 flush();
1859 }
1860
1861 char const *
1862 StoreEntry::getSerialisedMetaData()
1863 {
1864 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1865 int swap_hdr_sz;
1866 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1867 storeSwapTLVFree(tlv_list);
1868 assert (swap_hdr_sz >= 0);
1869 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1870 return result;
1871 }
1872
1873 /**
1874 * Abandon the transient entry our worker has created if neither the shared
1875 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1876 * any, should notice and use Plan B instead of getting stuck waiting for us
1877 * to start swapping the entry out.
1878 */
1879 void
1880 StoreEntry::transientsAbandonmentCheck()
1881 {
1882 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1883 mem_obj->xitTable.index >= 0 && // other workers may be interested
1884 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1885 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1886 debugs(20, 7, "cannot be shared: " << *this);
1887 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1888 Store::Root().transientsAbandon(*this);
1889 }
1890 }
1891
1892 void
1893 StoreEntry::memOutDecision(const bool)
1894 {
1895 transientsAbandonmentCheck();
1896 }
1897
1898 void
1899 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1900 {
1901 // Abandon our transient entry if neither shared memory nor disk wants it.
1902 assert(mem_obj);
1903 mem_obj->swapout.decision = decision;
1904 transientsAbandonmentCheck();
1905 }
1906
1907 void
1908 StoreEntry::trimMemory(const bool preserveSwappable)
1909 {
1910 /*
1911 * DPW 2007-05-09
1912 * Bug #1943. We must not let go any data for IN_MEMORY
1913 * objects. We have to wait until the mem_status changes.
1914 */
1915 if (mem_status == IN_MEMORY)
1916 return;
1917
1918 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1919 return; // cannot trim because we do not load them again
1920
1921 if (preserveSwappable)
1922 mem_obj->trimSwappable();
1923 else
1924 mem_obj->trimUnSwappable();
1925
1926 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1927 }
1928
1929 bool
1930 StoreEntry::modifiedSince(HttpRequest * request) const
1931 {
1932 int object_length;
1933 time_t mod_time = lastmod;
1934
1935 if (mod_time < 0)
1936 mod_time = timestamp;
1937
1938 debugs(88, 3, "modifiedSince: '" << url() << "'");
1939
1940 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1941
1942 if (mod_time < 0)
1943 return true;
1944
1945 /* Find size of the object */
1946 object_length = getReply()->content_length;
1947
1948 if (object_length < 0)
1949 object_length = contentLen();
1950
1951 if (mod_time > request->ims) {
1952 debugs(88, 3, "--> YES: entry newer than client");
1953 return true;
1954 } else if (mod_time < request->ims) {
1955 debugs(88, 3, "--> NO: entry older than client");
1956 return false;
1957 } else if (request->imslen < 0) {
1958 debugs(88, 3, "--> NO: same LMT, no client length");
1959 return false;
1960 } else if (request->imslen == object_length) {
1961 debugs(88, 3, "--> NO: same LMT, same length");
1962 return false;
1963 } else {
1964 debugs(88, 3, "--> YES: same LMT, different length");
1965 return true;
1966 }
1967 }
1968
1969 bool
1970 StoreEntry::hasEtag(ETag &etag) const
1971 {
1972 if (const HttpReply *reply = getReply()) {
1973 etag = reply->header.getETag(Http::HdrType::ETAG);
1974 if (etag.str)
1975 return true;
1976 }
1977 return false;
1978 }
1979
1980 bool
1981 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1982 {
1983 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
1984 return hasOneOfEtags(reqETags, false);
1985 }
1986
1987 bool
1988 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1989 {
1990 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
1991 // weak comparison is allowed only for HEAD or full-body GET requests
1992 const bool allowWeakMatch = !request.flags.isRanged &&
1993 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1994 return hasOneOfEtags(reqETags, allowWeakMatch);
1995 }
1996
1997 /// whether at least one of the request ETags matches entity ETag
1998 bool
1999 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2000 {
2001 const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
2002 if (!repETag.str)
2003 return strListIsMember(&reqETags, "*", ',');
2004
2005 bool matched = false;
2006 const char *pos = NULL;
2007 const char *item;
2008 int ilen;
2009 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2010 if (!strncmp(item, "*", ilen))
2011 matched = true;
2012 else {
2013 String str;
2014 str.append(item, ilen);
2015 ETag reqETag;
2016 if (etagParseInit(&reqETag, str.termedBuf())) {
2017 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2018 etagIsStrongEqual(repETag, reqETag);
2019 }
2020 }
2021 }
2022 return matched;
2023 }
2024
2025 Store::Disk &
2026 StoreEntry::disk() const
2027 {
2028 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2029 const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
2030 assert(sd);
2031 return *sd;
2032 }
2033
2034 /*
2035 * return true if the entry is in a state where
2036 * it can accept more data (ie with write() method)
2037 */
2038 bool
2039 StoreEntry::isAccepting() const
2040 {
2041 if (STORE_PENDING != store_status)
2042 return false;
2043
2044 if (EBIT_TEST(flags, ENTRY_ABORTED))
2045 return false;
2046
2047 return true;
2048 }
2049
2050 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2051 {
2052 os << "e:";
2053
2054 if (e.mem_obj) {
2055 if (e.mem_obj->xitTable.index > -1)
2056 os << 't' << e.mem_obj->xitTable.index;
2057 if (e.mem_obj->memCache.index > -1)
2058 os << 'm' << e.mem_obj->memCache.index;
2059 }
2060 if (e.swap_filen > -1 || e.swap_dirn > -1)
2061 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2062
2063 os << '=';
2064
2065 // print only non-default status values, using unique letters
2066 if (e.mem_status != NOT_IN_MEMORY ||
2067 e.store_status != STORE_PENDING ||
2068 e.swap_status != SWAPOUT_NONE ||
2069 e.ping_status != PING_NONE) {
2070 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2071 if (e.store_status != STORE_PENDING) os << 's';
2072 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2073 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2074 }
2075
2076 // print only set flags, using unique letters
2077 if (e.flags) {
2078 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2079 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2080 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2081 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2082 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2083 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2084 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2085 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2086 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2087 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2088 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2089 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2090 }
2091
2092 if (e.mem_obj && e.mem_obj->smpCollapsed)
2093 os << 'O';
2094
2095 return os << '/' << &e << '*' << e.locks();
2096 }
2097
2098 /* NullStoreEntry */
2099
2100 NullStoreEntry NullStoreEntry::_instance;
2101
2102 NullStoreEntry *
2103 NullStoreEntry::getInstance()
2104 {
2105 return &_instance;
2106 }
2107
2108 char const *
2109 NullStoreEntry::getMD5Text() const
2110 {
2111 return "N/A";
2112 }
2113
2114 void
2115 NullStoreEntry::operator delete(void*)
2116 {
2117 fatal ("Attempt to delete NullStoreEntry\n");
2118 }
2119
2120 char const *
2121 NullStoreEntry::getSerialisedMetaData()
2122 {
2123 return NULL;
2124 }
2125