]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Refactor Vector and Stack to STL counterparts
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * DEBUG: section 20 Storage Manager
4 * AUTHOR: Harvest Derived
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "CacheDigest.h"
36 #include "CacheManager.h"
37 #include "comm/Connection.h"
38 #include "ETag.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "globals.h"
42 #include "http.h"
43 #include "HttpReply.h"
44 #include "HttpRequest.h"
45 #include "mem_node.h"
46 #include "MemObject.h"
47 #include "mgr/Registration.h"
48 #include "mgr/StoreIoAction.h"
49 #include "profiler/Profiler.h"
50 #include "repl_modules.h"
51 #include "RequestFlags.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "StatCounters.h"
55 #include "stmem.h"
56 #include "Store.h"
57 #include "store_digest.h"
58 #include "store_key_md5.h"
59 #include "store_key_md5.h"
60 #include "store_log.h"
61 #include "store_rebuild.h"
62 #include "StoreClient.h"
63 #include "StoreIOState.h"
64 #include "StoreMeta.h"
65 #include "StrList.h"
66 #include "swap_log_op.h"
67 #include "SwapDir.h"
68 #include "tools.h"
69 #if USE_DELAY_POOLS
70 #include "DelayPools.h"
71 #endif
72 #if HAVE_LIMITS_H
73 #include <limits.h>
74 #endif
75
76 #include <stack>
77
78 #define REBUILD_TIMESTAMP_DELTA_MAX 2
79
80 #define STORE_IN_MEM_BUCKETS (229)
81
82 /** \todo Convert these string constants to enum string-arrays generated */
83
84 const char *memStatusStr[] = {
85 "NOT_IN_MEMORY",
86 "IN_MEMORY"
87 };
88
89 const char *pingStatusStr[] = {
90 "PING_NONE",
91 "PING_WAITING",
92 "PING_DONE"
93 };
94
95 const char *storeStatusStr[] = {
96 "STORE_OK",
97 "STORE_PENDING"
98 };
99
100 const char *swapStatusStr[] = {
101 "SWAPOUT_NONE",
102 "SWAPOUT_WRITING",
103 "SWAPOUT_DONE"
104 };
105
106 /*
107 * This defines an repl type
108 */
109
110 typedef struct _storerepl_entry storerepl_entry_t;
111
112 struct _storerepl_entry {
113 const char *typestr;
114 REMOVALPOLICYCREATE *create;
115 };
116
117 static storerepl_entry_t *storerepl_list = NULL;
118
119 /*
120 * local function prototypes
121 */
122 static int getKeyCounter(void);
123 static OBJH storeCheckCachableStats;
124 static EVH storeLateRelease;
125
126 /*
127 * local variables
128 */
129 static std::stack<StoreEntry*> LateReleaseStack;
130 MemAllocator *StoreEntry::pool = NULL;
131
132 StorePointer Store::CurrentRoot = NULL;
133
134 void
135 Store::Root(Store * aRoot)
136 {
137 CurrentRoot = aRoot;
138 }
139
140 void
141 Store::Root(StorePointer aRoot)
142 {
143 Root(aRoot.getRaw());
144 }
145
146 void
147 Store::Stats(StoreEntry * output)
148 {
149 assert (output);
150 Root().stat(*output);
151 }
152
153 void
154 Store::create()
155 {}
156
157 void
158 Store::diskFull()
159 {}
160
161 void
162 Store::sync()
163 {}
164
165 void
166 Store::unlink (StoreEntry &anEntry)
167 {
168 fatal("Store::unlink on invalid Store\n");
169 }
170
171 void *
172 StoreEntry::operator new (size_t bytecount)
173 {
174 assert (bytecount == sizeof (StoreEntry));
175
176 if (!pool) {
177 pool = memPoolCreate ("StoreEntry", bytecount);
178 pool->setChunkSize(2048 * 1024);
179 }
180
181 return pool->alloc();
182 }
183
184 void
185 StoreEntry::operator delete (void *address)
186 {
187 pool->freeOne(address);
188 }
189
190 void
191 StoreEntry::makePublic()
192 {
193 /* This object can be cached for a long time */
194
195 if (!EBIT_TEST(flags, RELEASE_REQUEST))
196 setPublicKey();
197 }
198
199 void
200 StoreEntry::makePrivate()
201 {
202 /* This object should never be cached at all */
203 expireNow();
204 releaseRequest(); /* delete object when not used */
205 }
206
207 void
208 StoreEntry::cacheNegatively()
209 {
210 /* This object may be negatively cached */
211 negativeCache();
212 makePublic();
213 }
214
215 size_t
216 StoreEntry::inUseCount()
217 {
218 if (!pool)
219 return 0;
220 return pool->getInUseCount();
221 }
222
223 const char *
224 StoreEntry::getMD5Text() const
225 {
226 return storeKeyText((const cache_key *)key);
227 }
228
229 #include "comm.h"
230
231 void
232 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
233 {
234 StoreEntry *anEntry = (StoreEntry *)theContext;
235 anEntry->delayAwareRead(aRead.conn,
236 aRead.buf,
237 aRead.len,
238 aRead.callback);
239 }
240
241 void
242 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
243 {
244 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
245 /* sketch: readdeferer* = getdeferer.
246 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
247 */
248
249 if (amountToRead == 0) {
250 assert (mem_obj);
251 /* read ahead limit */
252 /* Perhaps these two calls should both live in MemObject */
253 #if USE_DELAY_POOLS
254 if (!mem_obj->readAheadPolicyCanRead()) {
255 #endif
256 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
257 return;
258 #if USE_DELAY_POOLS
259 }
260
261 /* delay id limit */
262 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
263 return;
264
265 #endif
266
267 }
268
269 if (fd_table[conn->fd].closing()) {
270 // Readers must have closing callbacks if they want to be notified. No
271 // readers appeared to care around 2009/12/14 as they skipped reading
272 // for other reasons. Closing may already be true at the delyaAwareRead
273 // call time or may happen while we wait after delayRead() above.
274 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
275 callback);
276 return; // the read callback will never be called
277 }
278
279 comm_read(conn, buf, amountToRead, callback);
280 }
281
282 size_t
283 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
284 {
285 if (mem_obj == NULL)
286 return aRange.end;
287
288 #if URL_CHECKSUM_DEBUG
289
290 mem_obj->checkUrlChecksum();
291
292 #endif
293
294 if (!mem_obj->readAheadPolicyCanRead())
295 return 0;
296
297 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
298 }
299
300 bool
301 StoreEntry::checkDeferRead(int fd) const
302 {
303 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
304 }
305
306 void
307 StoreEntry::setNoDelay (bool const newValue)
308 {
309 if (mem_obj)
310 mem_obj->setNoDelay(newValue);
311 }
312
313 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
314 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
315 // It does not mean we will read from disk exclusively (or at all!).
316 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
317 // XXX: Collapsed clients cannot predict their type.
318 store_client_t
319 StoreEntry::storeClientType() const
320 {
321 /* The needed offset isn't in memory
322 * XXX TODO: this is wrong for range requests
323 * as the needed offset may *not* be 0, AND
324 * offset 0 in the memory object is the HTTP headers.
325 */
326
327 assert(mem_obj);
328
329 if (mem_obj->inmem_lo)
330 return STORE_DISK_CLIENT;
331
332 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
333 /* I don't think we should be adding clients to aborted entries */
334 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
335 return STORE_MEM_CLIENT;
336 }
337
338 if (store_status == STORE_OK) {
339 /* the object has completed. */
340
341 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
342 if (swap_status == SWAPOUT_DONE) {
343 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
344 if (mem_obj->endOffset() == mem_obj->object_sz) {
345 /* hot object fully swapped in (XXX: or swapped out?) */
346 return STORE_MEM_CLIENT;
347 }
348 } else {
349 /* Memory-only, or currently being swapped out */
350 return STORE_MEM_CLIENT;
351 }
352 }
353 return STORE_DISK_CLIENT;
354 }
355
356 /* here and past, entry is STORE_PENDING */
357 /*
358 * If this is the first client, let it be the mem client
359 */
360 if (mem_obj->nclients == 1)
361 return STORE_MEM_CLIENT;
362
363 /*
364 * If there is no disk file to open yet, we must make this a
365 * mem client. If we can't open the swapin file before writing
366 * to the client, there is no guarantee that we will be able
367 * to open it later when we really need it.
368 */
369 if (swap_status == SWAPOUT_NONE)
370 return STORE_MEM_CLIENT;
371
372 /*
373 * otherwise, make subsequent clients read from disk so they
374 * can not delay the first, and vice-versa.
375 */
376 return STORE_DISK_CLIENT;
377 }
378
379 StoreEntry::StoreEntry() :
380 mem_obj(NULL),
381 timestamp(-1),
382 lastref(-1),
383 expires(-1),
384 lastmod(-1),
385 swap_file_sz(0),
386 refcount(0),
387 flags(0),
388 swap_filen(-1),
389 swap_dirn(-1),
390 mem_status(NOT_IN_MEMORY),
391 ping_status(PING_NONE),
392 store_status(STORE_PENDING),
393 swap_status(SWAPOUT_NONE),
394 lock_count(0)
395 {
396 debugs(20, 5, "StoreEntry constructed, this=" << this);
397 }
398
399 StoreEntry::~StoreEntry()
400 {
401 debugs(20, 5, "StoreEntry destructed, this=" << this);
402 }
403
404 #if USE_ADAPTATION
405 void
406 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
407 {
408 if (!deferredProducer)
409 deferredProducer = producer;
410 else
411 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
412 *deferredProducer << ", requested call: " << *producer);
413 }
414
415 void
416 StoreEntry::kickProducer()
417 {
418 if (deferredProducer != NULL) {
419 ScheduleCallHere(deferredProducer);
420 deferredProducer = NULL;
421 }
422 }
423 #endif
424
425 void
426 StoreEntry::destroyMemObject()
427 {
428 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
429
430 if (MemObject *mem = mem_obj) {
431 // Store::Root() is FATALly missing during shutdown
432 if (mem->xitTable.index >= 0 && !shutting_down)
433 Store::Root().transientsDisconnect(*mem);
434 if (mem->memCache.index >= 0 && !shutting_down)
435 Store::Root().memoryDisconnect(*this);
436
437 setMemStatus(NOT_IN_MEMORY);
438 mem_obj = NULL;
439 delete mem;
440 }
441 }
442
443 void
444 destroyStoreEntry(void *data)
445 {
446 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
447 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
448 assert(e != NULL);
449
450 if (e == NullStoreEntry::getInstance())
451 return;
452
453 // Store::Root() is FATALly missing during shutdown
454 if (e->swap_filen >= 0 && !shutting_down) {
455 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
456 sd.disconnect(*e);
457 }
458
459 e->destroyMemObject();
460
461 e->hashDelete();
462
463 assert(e->key == NULL);
464
465 delete e;
466 }
467
468 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
469
470 void
471 StoreEntry::hashInsert(const cache_key * someKey)
472 {
473 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
474 key = storeKeyDup(someKey);
475 hash_join(store_table, this);
476 }
477
478 void
479 StoreEntry::hashDelete()
480 {
481 if (key) { // some test cases do not create keys and do not hashInsert()
482 hash_remove_link(store_table, this);
483 storeKeyFree((const cache_key *)key);
484 key = NULL;
485 }
486 }
487
488 /* -------------------------------------------------------------------------- */
489
490 /* get rid of memory copy of the object */
491 void
492 StoreEntry::purgeMem()
493 {
494 if (mem_obj == NULL)
495 return;
496
497 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
498
499 Store::Root().memoryUnlink(*this);
500
501 if (swap_status != SWAPOUT_DONE)
502 release();
503 }
504
505 void
506 StoreEntry::lock(const char *context)
507 {
508 ++lock_count;
509 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
510 }
511
512 void
513 StoreEntry::touch()
514 {
515 lastref = squid_curtime;
516 Store::Root().reference(*this);
517 }
518
519 void
520 StoreEntry::setReleaseFlag()
521 {
522 if (EBIT_TEST(flags, RELEASE_REQUEST))
523 return;
524
525 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
526
527 EBIT_SET(flags, RELEASE_REQUEST);
528
529 Store::Root().markForUnlink(*this);
530 }
531
532 void
533 StoreEntry::releaseRequest()
534 {
535 if (EBIT_TEST(flags, RELEASE_REQUEST))
536 return;
537
538 setReleaseFlag(); // makes validToSend() false, preventing future hits
539
540 setPrivateKey();
541 }
542
543 int
544 StoreEntry::unlock(const char *context)
545 {
546 debugs(20, 3, (context ? context : "somebody") <<
547 " unlocking key " << getMD5Text() << ' ' << *this);
548 assert(lock_count > 0);
549 --lock_count;
550
551 if (lock_count)
552 return (int) lock_count;
553
554 if (store_status == STORE_PENDING)
555 setReleaseFlag();
556
557 assert(storePendingNClients(this) == 0);
558
559 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
560 this->release();
561 return 0;
562 }
563
564 if (EBIT_TEST(flags, KEY_PRIVATE))
565 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
566
567 Store::Root().handleIdleEntry(*this); // may delete us
568 return 0;
569 }
570
571 void
572 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
573 {
574 assert (aClient);
575 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
576
577 if (!result)
578 aClient->created (NullStoreEntry::getInstance());
579 else
580 aClient->created (result);
581 }
582
583 void
584 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
585 {
586 assert (aClient);
587 StoreEntry *result = storeGetPublicByRequest (request);
588
589 if (!result)
590 result = NullStoreEntry::getInstance();
591
592 aClient->created (result);
593 }
594
595 void
596 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
597 {
598 assert (aClient);
599 StoreEntry *result = storeGetPublic (uri, method);
600
601 if (!result)
602 result = NullStoreEntry::getInstance();
603
604 aClient->created (result);
605 }
606
607 StoreEntry *
608 storeGetPublic(const char *uri, const HttpRequestMethod& method)
609 {
610 return Store::Root().get(storeKeyPublic(uri, method));
611 }
612
613 StoreEntry *
614 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
615 {
616 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
617 }
618
619 StoreEntry *
620 storeGetPublicByRequest(HttpRequest * req)
621 {
622 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
623
624 if (e == NULL && req->method == Http::METHOD_HEAD)
625 /* We can generate a HEAD reply from a cached GET object */
626 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
627
628 return e;
629 }
630
631 static int
632 getKeyCounter(void)
633 {
634 static int key_counter = 0;
635
636 if (++key_counter < 0)
637 key_counter = 1;
638
639 return key_counter;
640 }
641
642 /* RBC 20050104 AFAICT this should become simpler:
643 * rather than reinserting with a special key it should be marked
644 * as 'released' and then cleaned up when refcounting indicates.
645 * the StoreHashIndex could well implement its 'released' in the
646 * current manner.
647 * Also, clean log writing should skip over ia,t
648 * Otherwise, we need a 'remove from the index but not the store
649 * concept'.
650 */
651 void
652 StoreEntry::setPrivateKey()
653 {
654 const cache_key *newkey;
655
656 if (key && EBIT_TEST(flags, KEY_PRIVATE))
657 return; /* is already private */
658
659 if (key) {
660 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
661
662 // TODO: move into SwapDir::markForUnlink() already called by Root()
663 if (swap_filen > -1)
664 storeDirSwapLog(this, SWAP_LOG_DEL);
665
666 hashDelete();
667 }
668
669 if (mem_obj && mem_obj->hasUris()) {
670 mem_obj->id = getKeyCounter();
671 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
672 } else {
673 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
674 }
675
676 assert(hash_lookup(store_table, newkey) == NULL);
677 EBIT_SET(flags, KEY_PRIVATE);
678 hashInsert(newkey);
679 }
680
681 void
682 StoreEntry::setPublicKey()
683 {
684 const cache_key *newkey;
685
686 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
687 return; /* is already public */
688
689 assert(mem_obj);
690
691 /*
692 * We can't make RELEASE_REQUEST objects public. Depending on
693 * when RELEASE_REQUEST gets set, we might not be swapping out
694 * the object. If we're not swapping out, then subsequent
695 * store clients won't be able to access object data which has
696 * been freed from memory.
697 *
698 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
699 */
700 #if MORE_DEBUG_OUTPUT
701
702 if (EBIT_TEST(flags, RELEASE_REQUEST))
703 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
704
705 #endif
706
707 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
708
709 if (mem_obj->request) {
710 HttpRequest *request = mem_obj->request;
711
712 if (!mem_obj->vary_headers) {
713 /* First handle the case where the object no longer varies */
714 safe_free(request->vary_headers);
715 } else {
716 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
717 /* Oops.. the variance has changed. Kill the base object
718 * to record the new variance key
719 */
720 safe_free(request->vary_headers); /* free old "bad" variance key */
721 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
722 pe->release();
723 }
724
725 /* Make sure the request knows the variance status */
726 if (!request->vary_headers) {
727 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
728
729 if (vary)
730 request->vary_headers = xstrdup(vary);
731 }
732 }
733
734 // TODO: storeGetPublic() calls below may create unlocked entries.
735 // We should add/use storeHas() API or lock/unlock those entries.
736 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
737 /* Create "vary" base object */
738 String vary;
739 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
740 /* We are allowed to do this typecast */
741 HttpReply *rep = new HttpReply;
742 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
743 vary = mem_obj->getReply()->header.getList(HDR_VARY);
744
745 if (vary.size()) {
746 /* Again, we own this structure layout */
747 rep->header.putStr(HDR_VARY, vary.termedBuf());
748 vary.clean();
749 }
750
751 #if X_ACCELERATOR_VARY
752 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
753
754 if (vary.size() > 0) {
755 /* Again, we own this structure layout */
756 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
757 vary.clean();
758 }
759
760 #endif
761 pe->replaceHttpReply(rep, false); // no write until key is public
762
763 pe->timestampsSet();
764
765 pe->makePublic();
766
767 pe->startWriting(); // after makePublic()
768
769 pe->complete();
770
771 pe->unlock("StoreEntry::setPublicKey+Vary");
772 }
773
774 newkey = storeKeyPublicByRequest(mem_obj->request);
775 } else
776 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
777
778 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
779 debugs(20, 3, "Making old " << *e2 << " private.");
780 e2->setPrivateKey();
781 e2->release();
782
783 if (mem_obj->request)
784 newkey = storeKeyPublicByRequest(mem_obj->request);
785 else
786 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
787 }
788
789 if (key)
790 hashDelete();
791
792 EBIT_CLR(flags, KEY_PRIVATE);
793
794 hashInsert(newkey);
795
796 if (swap_filen > -1)
797 storeDirSwapLog(this, SWAP_LOG_ADD);
798 }
799
800 StoreEntry *
801 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
802 {
803 StoreEntry *e = NULL;
804 debugs(20, 3, "storeCreateEntry: '" << url << "'");
805
806 e = new StoreEntry();
807 e->makeMemObject();
808 e->mem_obj->setUris(url, log_url, method);
809
810 if (flags.cachable) {
811 EBIT_CLR(e->flags, RELEASE_REQUEST);
812 } else {
813 e->releaseRequest();
814 }
815
816 e->store_status = STORE_PENDING;
817 e->refcount = 0;
818 e->lastref = squid_curtime;
819 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
820 e->ping_status = PING_NONE;
821 EBIT_SET(e->flags, ENTRY_VALIDATED);
822 return e;
823 }
824
825 StoreEntry *
826 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
827 {
828 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
829 e->lock("storeCreateEntry");
830
831 if (neighbors_do_private_keys || !flags.hierarchical)
832 e->setPrivateKey();
833 else
834 e->setPublicKey();
835
836 return e;
837 }
838
839 /* Mark object as expired */
840 void
841 StoreEntry::expireNow()
842 {
843 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
844 expires = squid_curtime;
845 }
846
847 void
848 StoreEntry::write (StoreIOBuffer writeBuffer)
849 {
850 assert(mem_obj != NULL);
851 /* This assert will change when we teach the store to update */
852 PROF_start(StoreEntry_write);
853 assert(store_status == STORE_PENDING);
854
855 // XXX: caller uses content offset, but we also store headers
856 if (const HttpReply *reply = mem_obj->getReply())
857 writeBuffer.offset += reply->hdr_sz;
858
859 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
860 PROF_stop(StoreEntry_write);
861 storeGetMemSpace(writeBuffer.length);
862 mem_obj->write(writeBuffer);
863
864 if (!EBIT_TEST(flags, DELAY_SENDING))
865 invokeHandlers();
866 }
867
868 /* Append incoming data from a primary server to an entry. */
869 void
870 StoreEntry::append(char const *buf, int len)
871 {
872 assert(mem_obj != NULL);
873 assert(len >= 0);
874 assert(store_status == STORE_PENDING);
875
876 StoreIOBuffer tempBuffer;
877 tempBuffer.data = (char *)buf;
878 tempBuffer.length = len;
879 /*
880 * XXX sigh, offset might be < 0 here, but it gets "corrected"
881 * later. This offset crap is such a mess.
882 */
883 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
884 write(tempBuffer);
885 }
886
887 void
888 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
889 {
890 va_list args;
891 va_start(args, fmt);
892
893 storeAppendVPrintf(e, fmt, args);
894 va_end(args);
895 }
896
897 /* used be storeAppendPrintf and Packer */
898 void
899 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
900 {
901 LOCAL_ARRAY(char, buf, 4096);
902 buf[0] = '\0';
903 vsnprintf(buf, 4096, fmt, vargs);
904 e->append(buf, strlen(buf));
905 }
906
907 struct _store_check_cachable_hist {
908
909 struct {
910 int non_get;
911 int not_entry_cachable;
912 int wrong_content_length;
913 int negative_cached;
914 int too_big;
915 int too_small;
916 int private_key;
917 int too_many_open_files;
918 int too_many_open_fds;
919 } no;
920
921 struct {
922 int Default;
923 } yes;
924 } store_check_cachable_hist;
925
926 int
927 storeTooManyDiskFilesOpen(void)
928 {
929 if (Config.max_open_disk_fds == 0)
930 return 0;
931
932 if (store_open_disk_fd > Config.max_open_disk_fds)
933 return 1;
934
935 return 0;
936 }
937
938 int
939 StoreEntry::checkTooSmall()
940 {
941 if (EBIT_TEST(flags, ENTRY_SPECIAL))
942 return 0;
943
944 if (STORE_OK == store_status)
945 if (mem_obj->object_sz < 0 ||
946 mem_obj->object_sz < Config.Store.minObjectSize)
947 return 1;
948 if (getReply()->content_length > -1)
949 if (getReply()->content_length < Config.Store.minObjectSize)
950 return 1;
951 return 0;
952 }
953
954 // TODO: remove checks already performed by swapoutPossible()
955 // TODO: move "too many open..." checks outside -- we are called too early/late
956 int
957 StoreEntry::checkCachable()
958 {
959 #if CACHE_ALL_METHODS
960
961 if (mem_obj->method != Http::METHOD_GET) {
962 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
963 ++store_check_cachable_hist.no.non_get;
964 } else
965 #endif
966 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
967 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
968 ++store_check_cachable_hist.no.wrong_content_length;
969 } else if (EBIT_TEST(flags, RELEASE_REQUEST)) {
970 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
971 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
972 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
973 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
974 ++store_check_cachable_hist.no.negative_cached;
975 return 0; /* avoid release call below */
976 } else if ((getReply()->content_length > 0 &&
977 getReply()->content_length > store_maxobjsize) ||
978 mem_obj->endOffset() > store_maxobjsize) {
979 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
980 ++store_check_cachable_hist.no.too_big;
981 } else if (checkTooSmall()) {
982 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
983 ++store_check_cachable_hist.no.too_small;
984 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
985 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
986 ++store_check_cachable_hist.no.private_key;
987 } else if (swap_status != SWAPOUT_NONE) {
988 /*
989 * here we checked the swap_status because the remaining
990 * cases are only relevant only if we haven't started swapping
991 * out the object yet.
992 */
993 return 1;
994 } else if (storeTooManyDiskFilesOpen()) {
995 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
996 ++store_check_cachable_hist.no.too_many_open_files;
997 } else if (fdNFree() < RESERVED_FD) {
998 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
999 ++store_check_cachable_hist.no.too_many_open_fds;
1000 } else {
1001 ++store_check_cachable_hist.yes.Default;
1002 return 1;
1003 }
1004
1005 releaseRequest();
1006 return 0;
1007 }
1008
1009 void
1010 storeCheckCachableStats(StoreEntry *sentry)
1011 {
1012 storeAppendPrintf(sentry, "Category\t Count\n");
1013
1014 #if CACHE_ALL_METHODS
1015
1016 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1017 store_check_cachable_hist.no.non_get);
1018 #endif
1019
1020 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1021 store_check_cachable_hist.no.not_entry_cachable);
1022 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1023 store_check_cachable_hist.no.wrong_content_length);
1024 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1025 store_check_cachable_hist.no.negative_cached);
1026 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1027 store_check_cachable_hist.no.too_big);
1028 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1029 store_check_cachable_hist.no.too_small);
1030 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1031 store_check_cachable_hist.no.private_key);
1032 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1033 store_check_cachable_hist.no.too_many_open_files);
1034 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1035 store_check_cachable_hist.no.too_many_open_fds);
1036 storeAppendPrintf(sentry, "yes.default\t%d\n",
1037 store_check_cachable_hist.yes.Default);
1038 }
1039
1040 void
1041 StoreEntry::complete()
1042 {
1043 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1044
1045 if (store_status != STORE_PENDING) {
1046 /*
1047 * if we're not STORE_PENDING, then probably we got aborted
1048 * and there should be NO clients on this entry
1049 */
1050 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1051 assert(mem_obj->nclients == 0);
1052 return;
1053 }
1054
1055 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1056 * in use of object_sz?
1057 */
1058 mem_obj->object_sz = mem_obj->endOffset();
1059
1060 store_status = STORE_OK;
1061
1062 assert(mem_status == NOT_IN_MEMORY);
1063
1064 if (!validLength()) {
1065 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1066 releaseRequest();
1067 }
1068
1069 #if USE_CACHE_DIGESTS
1070 if (mem_obj->request)
1071 mem_obj->request->hier.store_complete_stop = current_time;
1072
1073 #endif
1074 /*
1075 * We used to call invokeHandlers, then storeSwapOut. However,
1076 * Madhukar Reddy <myreddy@persistence.com> reported that
1077 * responses without content length would sometimes get released
1078 * in client_side, thinking that the response is incomplete.
1079 */
1080 invokeHandlers();
1081 }
1082
1083 /*
1084 * Someone wants to abort this transfer. Set the reason in the
1085 * request structure, call the server-side callback and mark the
1086 * entry for releasing
1087 */
1088 void
1089 StoreEntry::abort()
1090 {
1091 ++statCounter.aborted_requests;
1092 assert(store_status == STORE_PENDING);
1093 assert(mem_obj != NULL);
1094 debugs(20, 6, "storeAbort: " << getMD5Text());
1095
1096 lock("StoreEntry::abort"); /* lock while aborting */
1097 negativeCache();
1098
1099 releaseRequest();
1100
1101 EBIT_SET(flags, ENTRY_ABORTED);
1102
1103 setMemStatus(NOT_IN_MEMORY);
1104
1105 store_status = STORE_OK;
1106
1107 /* Notify the server side */
1108
1109 /*
1110 * DPW 2007-05-07
1111 * Should we check abort.data for validity?
1112 */
1113 if (mem_obj->abort.callback) {
1114 if (!cbdataReferenceValid(mem_obj->abort.data))
1115 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1116 eventAdd("mem_obj->abort.callback",
1117 mem_obj->abort.callback,
1118 mem_obj->abort.data,
1119 0.0,
1120 true);
1121 unregisterAbort();
1122 }
1123
1124 /* XXX Should we reverse these two, so that there is no
1125 * unneeded disk swapping triggered?
1126 */
1127 /* Notify the client side */
1128 invokeHandlers();
1129
1130 // abort swap out, invalidating what was created so far (release follows)
1131 swapOutFileClose(StoreIOState::writerGone);
1132
1133 unlock("StoreEntry::abort"); /* unlock */
1134 }
1135
1136 /**
1137 * Clear Memory storage to accommodate the given object len
1138 */
1139 void
1140 storeGetMemSpace(int size)
1141 {
1142 PROF_start(storeGetMemSpace);
1143 StoreEntry *e = NULL;
1144 int released = 0;
1145 static time_t last_check = 0;
1146 size_t pages_needed;
1147 RemovalPurgeWalker *walker;
1148
1149 if (squid_curtime == last_check) {
1150 PROF_stop(storeGetMemSpace);
1151 return;
1152 }
1153
1154 last_check = squid_curtime;
1155
1156 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1157
1158 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1159 PROF_stop(storeGetMemSpace);
1160 return;
1161 }
1162
1163 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1164 " pages");
1165
1166 /* XXX what to set as max_scan here? */
1167 walker = mem_policy->PurgeInit(mem_policy, 100000);
1168
1169 while ((e = walker->Next(walker))) {
1170 e->purgeMem();
1171 ++released;
1172
1173 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1174 break;
1175 }
1176
1177 walker->Done(walker);
1178 debugs(20, 3, "storeGetMemSpace stats:");
1179 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1180 debugs(20, 3, " " << std::setw(6) << released << " were released");
1181 PROF_stop(storeGetMemSpace);
1182 }
1183
1184 /* thunk through to Store::Root().maintain(). Note that this would be better still
1185 * if registered against the root store itself, but that requires more complex
1186 * update logic - bigger fish to fry first. Long term each store when
1187 * it becomes active will self register
1188 */
1189 void
1190 Store::Maintain(void *notused)
1191 {
1192 Store::Root().maintain();
1193
1194 /* Reregister a maintain event .. */
1195 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1196
1197 }
1198
1199 /* The maximum objects to scan for maintain storage space */
1200 #define MAINTAIN_MAX_SCAN 1024
1201 #define MAINTAIN_MAX_REMOVE 64
1202
1203 /*
1204 * This routine is to be called by main loop in main.c.
1205 * It removes expired objects on only one bucket for each time called.
1206 *
1207 * This should get called 1/s from main().
1208 */
1209 void
1210 StoreController::maintain()
1211 {
1212 static time_t last_warn_time = 0;
1213
1214 PROF_start(storeMaintainSwapSpace);
1215 swapDir->maintain();
1216
1217 /* this should be emitted by the oversize dir, not globally */
1218
1219 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1220 if (squid_curtime - last_warn_time > 10) {
1221 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1222 << Store::Root().currentSize() / 1024.0 << " KB > "
1223 << (Store::Root().maxSize() >> 10) << " KB");
1224 last_warn_time = squid_curtime;
1225 }
1226 }
1227
1228 PROF_stop(storeMaintainSwapSpace);
1229 }
1230
1231 /* release an object from a cache */
1232 void
1233 StoreEntry::release()
1234 {
1235 PROF_start(storeRelease);
1236 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1237 /* If, for any reason we can't discard this object because of an
1238 * outstanding request, mark it for pending release */
1239
1240 if (locked()) {
1241 expireNow();
1242 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1243 releaseRequest();
1244 PROF_stop(storeRelease);
1245 return;
1246 }
1247
1248 Store::Root().memoryUnlink(*this);
1249
1250 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1251 setPrivateKey();
1252
1253 if (swap_filen > -1) {
1254 // lock the entry until rebuilding is done
1255 lock("storeLateRelease");
1256 setReleaseFlag();
1257 LateReleaseStack.push(this);
1258 } else {
1259 destroyStoreEntry(static_cast<hash_link *>(this));
1260 // "this" is no longer valid
1261 }
1262
1263 PROF_stop(storeRelease);
1264 return;
1265 }
1266
1267 storeLog(STORE_LOG_RELEASE, this);
1268
1269 if (swap_filen > -1) {
1270 // log before unlink() below clears swap_filen
1271 if (!EBIT_TEST(flags, KEY_PRIVATE))
1272 storeDirSwapLog(this, SWAP_LOG_DEL);
1273
1274 unlink();
1275 }
1276
1277 destroyStoreEntry(static_cast<hash_link *>(this));
1278 PROF_stop(storeRelease);
1279 }
1280
1281 static void
1282 storeLateRelease(void *unused)
1283 {
1284 StoreEntry *e;
1285 static int n = 0;
1286
1287 if (StoreController::store_dirs_rebuilding) {
1288 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1289 return;
1290 }
1291
1292 // TODO: this works but looks unelegant.
1293 for (int i = 0; i < 10; ++i) {
1294 if (LateReleaseStack.empty()) {
1295 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1296 return;
1297 } else {
1298 e = LateReleaseStack.top();
1299 LateReleaseStack.pop();
1300 }
1301
1302 e->unlock("storeLateRelease");
1303 ++n;
1304 }
1305
1306 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1307 }
1308
1309 /* return 1 if a store entry is locked */
1310 int
1311 StoreEntry::locked() const
1312 {
1313 if (lock_count)
1314 return 1;
1315
1316 /*
1317 * SPECIAL, PUBLIC entries should be "locked";
1318 * XXX: Their owner should lock them then instead of relying on this hack.
1319 */
1320 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1321 if (!EBIT_TEST(flags, KEY_PRIVATE))
1322 return 1;
1323
1324 return 0;
1325 }
1326
1327 bool
1328 StoreEntry::validLength() const
1329 {
1330 int64_t diff;
1331 const HttpReply *reply;
1332 assert(mem_obj != NULL);
1333 reply = getReply();
1334 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1335 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1336 objectLen());
1337 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1338 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1339
1340 if (reply->content_length < 0) {
1341 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1342 return 1;
1343 }
1344
1345 if (reply->hdr_sz == 0) {
1346 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1347 return 1;
1348 }
1349
1350 if (mem_obj->method == Http::METHOD_HEAD) {
1351 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1352 return 1;
1353 }
1354
1355 if (reply->sline.status() == Http::scNotModified)
1356 return 1;
1357
1358 if (reply->sline.status() == Http::scNoContent)
1359 return 1;
1360
1361 diff = reply->hdr_sz + reply->content_length - objectLen();
1362
1363 if (diff == 0)
1364 return 1;
1365
1366 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1367
1368 return 0;
1369 }
1370
1371 static void
1372 storeRegisterWithCacheManager(void)
1373 {
1374 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1375 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1376 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1377 storeCheckCachableStats, 0, 1);
1378 }
1379
1380 void
1381 storeInit(void)
1382 {
1383 storeKeyInit();
1384 mem_policy = createRemovalPolicy(Config.memPolicy);
1385 storeDigestInit();
1386 storeLogOpen();
1387 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1388 Store::Root().init();
1389 storeRebuildStart();
1390
1391 storeRegisterWithCacheManager();
1392 }
1393
1394 void
1395 storeConfigure(void)
1396 {
1397 store_swap_high = (long) (((float) Store::Root().maxSize() *
1398 (float) Config.Swap.highWaterMark) / (float) 100);
1399 store_swap_low = (long) (((float) Store::Root().maxSize() *
1400 (float) Config.Swap.lowWaterMark) / (float) 100);
1401 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1402 }
1403
1404 bool
1405 StoreEntry::memoryCachable() const
1406 {
1407 if (mem_obj == NULL)
1408 return 0;
1409
1410 if (mem_obj->data_hdr.size() == 0)
1411 return 0;
1412
1413 if (mem_obj->inmem_lo != 0)
1414 return 0;
1415
1416 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1417 return 0;
1418
1419 return 1;
1420 }
1421
1422 int
1423 StoreEntry::checkNegativeHit() const
1424 {
1425 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1426 return 0;
1427
1428 if (expires <= squid_curtime)
1429 return 0;
1430
1431 if (store_status != STORE_OK)
1432 return 0;
1433
1434 return 1;
1435 }
1436
1437 /**
1438 * Set object for negative caching.
1439 * Preserves any expiry information given by the server.
1440 * In absence of proper expiry info it will set to expire immediately,
1441 * or with HTTP-violations enabled the configured negative-TTL is observed
1442 */
1443 void
1444 StoreEntry::negativeCache()
1445 {
1446 // XXX: should make the default for expires 0 instead of -1
1447 // so we can distinguish "Expires: -1" from nothing.
1448 if (expires <= 0)
1449 #if USE_HTTP_VIOLATIONS
1450 expires = squid_curtime + Config.negativeTtl;
1451 #else
1452 expires = squid_curtime;
1453 #endif
1454 EBIT_SET(flags, ENTRY_NEGCACHED);
1455 }
1456
1457 void
1458 storeFreeMemory(void)
1459 {
1460 Store::Root(NULL);
1461 #if USE_CACHE_DIGESTS
1462
1463 if (store_digest)
1464 cacheDigestDestroy(store_digest);
1465
1466 #endif
1467
1468 store_digest = NULL;
1469 }
1470
1471 int
1472 expiresMoreThan(time_t expires, time_t when)
1473 {
1474 if (expires < 0) /* No Expires given */
1475 return 1;
1476
1477 return (expires > (squid_curtime + when));
1478 }
1479
1480 int
1481 StoreEntry::validToSend() const
1482 {
1483 if (EBIT_TEST(flags, RELEASE_REQUEST))
1484 return 0;
1485
1486 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1487 if (expires <= squid_curtime)
1488 return 0;
1489
1490 if (EBIT_TEST(flags, ENTRY_ABORTED))
1491 return 0;
1492
1493 // now check that the entry has a cache backing or is collapsed
1494 if (swap_filen > -1) // backed by a disk cache
1495 return 1;
1496
1497 if (swappingOut()) // will be backed by a disk cache
1498 return 1;
1499
1500 if (!mem_obj) // not backed by a memory cache and not collapsed
1501 return 0;
1502
1503 if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1504 return 0;
1505
1506 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1507 // disk cache backing so we should not rely on the store cache at all. This
1508 // is wrong for range requests that could feed off nibbled memory (XXX).
1509 if (mem_obj->inmem_lo) // in local memory cache, but got nibbled at
1510 return 0;
1511
1512 return 1;
1513 }
1514
1515 void
1516 StoreEntry::timestampsSet()
1517 {
1518 const HttpReply *reply = getReply();
1519 time_t served_date = reply->date;
1520 int age = reply->header.getInt(HDR_AGE);
1521 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1522 /* make sure that 0 <= served_date <= squid_curtime */
1523
1524 if (served_date < 0 || served_date > squid_curtime)
1525 served_date = squid_curtime;
1526
1527 /* Bug 1791:
1528 * If the returned Date: is more than 24 hours older than
1529 * the squid_curtime, then one of us needs to use NTP to set our
1530 * clock. We'll pretend that our clock is right.
1531 */
1532 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1533 served_date = squid_curtime;
1534
1535 /*
1536 * Compensate with Age header if origin server clock is ahead
1537 * of us and there is a cache in between us and the origin
1538 * server. But DONT compensate if the age value is larger than
1539 * squid_curtime because it results in a negative served_date.
1540 */
1541 if (age > squid_curtime - served_date)
1542 if (squid_curtime > age)
1543 served_date = squid_curtime - age;
1544
1545 // compensate for Squid-to-server and server-to-Squid delays
1546 if (mem_obj && mem_obj->request) {
1547 const time_t request_sent =
1548 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1549 if (0 < request_sent && request_sent < squid_curtime)
1550 served_date -= (squid_curtime - request_sent);
1551 }
1552
1553 if (reply->expires > 0 && reply->date > -1)
1554 expires = served_date + (reply->expires - reply->date);
1555 else
1556 expires = reply->expires;
1557
1558 lastmod = reply->last_modified;
1559
1560 timestamp = served_date;
1561 }
1562
1563 void
1564 StoreEntry::registerAbort(STABH * cb, void *data)
1565 {
1566 assert(mem_obj);
1567 assert(mem_obj->abort.callback == NULL);
1568 mem_obj->abort.callback = cb;
1569 mem_obj->abort.data = cbdataReference(data);
1570 }
1571
1572 void
1573 StoreEntry::unregisterAbort()
1574 {
1575 assert(mem_obj);
1576 if (mem_obj->abort.callback) {
1577 mem_obj->abort.callback = NULL;
1578 cbdataReferenceDone(mem_obj->abort.data);
1579 }
1580 }
1581
1582 void
1583 StoreEntry::dump(int l) const
1584 {
1585 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1586 debugs(20, l, "StoreEntry->next: " << next);
1587 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1588 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1589 debugs(20, l, "StoreEntry->lastref: " << lastref);
1590 debugs(20, l, "StoreEntry->expires: " << expires);
1591 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1592 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1593 debugs(20, l, "StoreEntry->refcount: " << refcount);
1594 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1595 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1596 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1597 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1598 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1599 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1600 debugs(20, l, "StoreEntry->store_status: " << store_status);
1601 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1602 }
1603
1604 /*
1605 * NOTE, this function assumes only two mem states
1606 */
1607 void
1608 StoreEntry::setMemStatus(mem_status_t new_status)
1609 {
1610 if (new_status == mem_status)
1611 return;
1612
1613 // are we using a shared memory cache?
1614 if (Config.memShared && IamWorkerProcess()) {
1615 // This method was designed to update replacement policy, not to
1616 // actually purge something from the memory cache (TODO: rename?).
1617 // Shared memory cache does not have a policy that needs updates.
1618 mem_status = new_status;
1619 return;
1620 }
1621
1622 assert(mem_obj != NULL);
1623
1624 if (new_status == IN_MEMORY) {
1625 assert(mem_obj->inmem_lo == 0);
1626
1627 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1628 debugs(20, 4, "not inserting special " << *this << " into policy");
1629 } else {
1630 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1631 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1632 }
1633
1634 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1635 } else {
1636 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1637 debugs(20, 4, "not removing special " << *this << " from policy");
1638 } else {
1639 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1640 debugs(20, 4, "removed " << *this);
1641 }
1642
1643 --hot_obj_count;
1644 }
1645
1646 mem_status = new_status;
1647 }
1648
1649 const char *
1650 StoreEntry::url() const
1651 {
1652 if (this == NULL)
1653 return "[null_entry]";
1654 else if (mem_obj == NULL)
1655 return "[null_mem_obj]";
1656 else
1657 return mem_obj->storeId();
1658 }
1659
1660 MemObject *
1661 StoreEntry::makeMemObject()
1662 {
1663 if (!mem_obj)
1664 mem_obj = new MemObject();
1665 return mem_obj;
1666 }
1667
1668 void
1669 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1670 {
1671 makeMemObject();
1672 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1673 }
1674
1675 /* this just sets DELAY_SENDING */
1676 void
1677 StoreEntry::buffer()
1678 {
1679 EBIT_SET(flags, DELAY_SENDING);
1680 }
1681
1682 /* this just clears DELAY_SENDING and Invokes the handlers */
1683 void
1684 StoreEntry::flush()
1685 {
1686 if (EBIT_TEST(flags, DELAY_SENDING)) {
1687 EBIT_CLR(flags, DELAY_SENDING);
1688 invokeHandlers();
1689 }
1690 }
1691
1692 int64_t
1693 StoreEntry::objectLen() const
1694 {
1695 assert(mem_obj != NULL);
1696 return mem_obj->object_sz;
1697 }
1698
1699 int64_t
1700 StoreEntry::contentLen() const
1701 {
1702 assert(mem_obj != NULL);
1703 assert(getReply() != NULL);
1704 return objectLen() - getReply()->hdr_sz;
1705 }
1706
1707 HttpReply const *
1708 StoreEntry::getReply () const
1709 {
1710 if (NULL == mem_obj)
1711 return NULL;
1712
1713 return mem_obj->getReply();
1714 }
1715
1716 void
1717 StoreEntry::reset()
1718 {
1719 assert (mem_obj);
1720 debugs(20, 3, "StoreEntry::reset: " << url());
1721 mem_obj->reset();
1722 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1723 rep->reset();
1724 expires = lastmod = timestamp = -1;
1725 }
1726
1727 /*
1728 * storeFsInit
1729 *
1730 * This routine calls the SETUP routine for each fs type.
1731 * I don't know where the best place for this is, and I'm not going to shuffle
1732 * around large chunks of code right now (that can be done once its working.)
1733 */
1734 void
1735 storeFsInit(void)
1736 {
1737 storeReplSetup();
1738 }
1739
1740 /*
1741 * called to add another store removal policy module
1742 */
1743 void
1744 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1745 {
1746 int i;
1747
1748 /* find the number of currently known repl types */
1749 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1750 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1751 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1752 return;
1753 }
1754 }
1755
1756 /* add the new type */
1757 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1758
1759 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1760
1761 storerepl_list[i].typestr = type;
1762
1763 storerepl_list[i].create = create;
1764 }
1765
1766 /*
1767 * Create a removal policy instance
1768 */
1769 RemovalPolicy *
1770 createRemovalPolicy(RemovalPolicySettings * settings)
1771 {
1772 storerepl_entry_t *r;
1773
1774 for (r = storerepl_list; r && r->typestr; ++r) {
1775 if (strcmp(r->typestr, settings->type) == 0)
1776 return r->create(settings->args);
1777 }
1778
1779 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1780 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1781 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1782 fatalf("ERROR: Unknown policy %s\n", settings->type);
1783 return NULL; /* NOTREACHED */
1784 }
1785
1786 #if 0
1787 void
1788 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1789 {
1790 if (e->swap_file_number == filn)
1791 return;
1792
1793 if (filn < 0) {
1794 assert(-1 == filn);
1795 storeDirMapBitReset(e->swap_file_number);
1796 storeDirLRUDelete(e);
1797 e->swap_file_number = -1;
1798 } else {
1799 assert(-1 == e->swap_file_number);
1800 storeDirMapBitSet(e->swap_file_number = filn);
1801 storeDirLRUAdd(e);
1802 }
1803 }
1804
1805 #endif
1806
1807 /*
1808 * Replace a store entry with
1809 * a new reply. This eats the reply.
1810 */
1811 void
1812 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1813 {
1814 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1815
1816 if (!mem_obj) {
1817 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1818 return;
1819 }
1820
1821 mem_obj->replaceHttpReply(rep);
1822
1823 if (andStartWriting)
1824 startWriting();
1825 }
1826
1827 void
1828 StoreEntry::startWriting()
1829 {
1830 Packer p;
1831
1832 /* TODO: when we store headers serparately remove the header portion */
1833 /* TODO: mark the length of the headers ? */
1834 /* We ONLY want the headers */
1835 packerToStoreInit(&p, this);
1836
1837 assert (isEmpty());
1838 assert(mem_obj);
1839
1840 const HttpReply *rep = getReply();
1841 assert(rep);
1842
1843 rep->packHeadersInto(&p);
1844 mem_obj->markEndOfReplyHeaders();
1845 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1846
1847 rep->body.packInto(&p);
1848
1849 packerClean(&p);
1850 }
1851
1852 char const *
1853 StoreEntry::getSerialisedMetaData()
1854 {
1855 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1856 int swap_hdr_sz;
1857 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1858 storeSwapTLVFree(tlv_list);
1859 assert (swap_hdr_sz >= 0);
1860 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1861 return result;
1862 }
1863
1864 void
1865 StoreEntry::trimMemory(const bool preserveSwappable)
1866 {
1867 /*
1868 * DPW 2007-05-09
1869 * Bug #1943. We must not let go any data for IN_MEMORY
1870 * objects. We have to wait until the mem_status changes.
1871 */
1872 if (mem_status == IN_MEMORY)
1873 return;
1874
1875 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1876 return; // cannot trim because we do not load them again
1877
1878 if (preserveSwappable)
1879 mem_obj->trimSwappable();
1880 else
1881 mem_obj->trimUnSwappable();
1882
1883 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1884 }
1885
1886 bool
1887 StoreEntry::modifiedSince(HttpRequest * request) const
1888 {
1889 int object_length;
1890 time_t mod_time = lastmod;
1891
1892 if (mod_time < 0)
1893 mod_time = timestamp;
1894
1895 debugs(88, 3, "modifiedSince: '" << url() << "'");
1896
1897 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1898
1899 if (mod_time < 0)
1900 return true;
1901
1902 /* Find size of the object */
1903 object_length = getReply()->content_length;
1904
1905 if (object_length < 0)
1906 object_length = contentLen();
1907
1908 if (mod_time > request->ims) {
1909 debugs(88, 3, "--> YES: entry newer than client");
1910 return true;
1911 } else if (mod_time < request->ims) {
1912 debugs(88, 3, "--> NO: entry older than client");
1913 return false;
1914 } else if (request->imslen < 0) {
1915 debugs(88, 3, "--> NO: same LMT, no client length");
1916 return false;
1917 } else if (request->imslen == object_length) {
1918 debugs(88, 3, "--> NO: same LMT, same length");
1919 return false;
1920 } else {
1921 debugs(88, 3, "--> YES: same LMT, different length");
1922 return true;
1923 }
1924 }
1925
1926 bool
1927 StoreEntry::hasEtag(ETag &etag) const
1928 {
1929 if (const HttpReply *reply = getReply()) {
1930 etag = reply->header.getETag(HDR_ETAG);
1931 if (etag.str)
1932 return true;
1933 }
1934 return false;
1935 }
1936
1937 bool
1938 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1939 {
1940 const String reqETags = request.header.getList(HDR_IF_MATCH);
1941 return hasOneOfEtags(reqETags, false);
1942 }
1943
1944 bool
1945 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1946 {
1947 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1948 // weak comparison is allowed only for HEAD or full-body GET requests
1949 const bool allowWeakMatch = !request.flags.isRanged &&
1950 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1951 return hasOneOfEtags(reqETags, allowWeakMatch);
1952 }
1953
1954 /// whether at least one of the request ETags matches entity ETag
1955 bool
1956 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1957 {
1958 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1959 if (!repETag.str)
1960 return strListIsMember(&reqETags, "*", ',');
1961
1962 bool matched = false;
1963 const char *pos = NULL;
1964 const char *item;
1965 int ilen;
1966 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1967 if (!strncmp(item, "*", ilen))
1968 matched = true;
1969 else {
1970 String str;
1971 str.append(item, ilen);
1972 ETag reqETag;
1973 if (etagParseInit(&reqETag, str.termedBuf())) {
1974 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1975 etagIsStrongEqual(repETag, reqETag);
1976 }
1977 }
1978 }
1979 return matched;
1980 }
1981
1982 SwapDir::Pointer
1983 StoreEntry::store() const
1984 {
1985 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
1986 return INDEXSD(swap_dirn);
1987 }
1988
1989 void
1990 StoreEntry::unlink()
1991 {
1992 store()->unlink(*this); // implies disconnect()
1993 swap_filen = -1;
1994 swap_dirn = -1;
1995 swap_status = SWAPOUT_NONE;
1996 }
1997
1998 /*
1999 * return true if the entry is in a state where
2000 * it can accept more data (ie with write() method)
2001 */
2002 bool
2003 StoreEntry::isAccepting() const
2004 {
2005 if (STORE_PENDING != store_status)
2006 return false;
2007
2008 if (EBIT_TEST(flags, ENTRY_ABORTED))
2009 return false;
2010
2011 return true;
2012 }
2013
2014 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2015 {
2016 os << "e:";
2017
2018 if (e.mem_obj) {
2019 if (e.mem_obj->xitTable.index > -1)
2020 os << 't' << e.mem_obj->xitTable.index;
2021 if (e.mem_obj->memCache.index > -1)
2022 os << 'm' << e.mem_obj->memCache.index;
2023 }
2024 if (e.swap_filen > -1 || e.swap_dirn > -1)
2025 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2026
2027 os << '=';
2028
2029 // print only non-default status values, using unique letters
2030 if (e.mem_status != NOT_IN_MEMORY ||
2031 e.store_status != STORE_PENDING ||
2032 e.swap_status != SWAPOUT_NONE ||
2033 e.ping_status != PING_NONE) {
2034 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2035 if (e.store_status != STORE_PENDING) os << 's';
2036 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2037 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2038 }
2039
2040 // print only set flags, using unique letters
2041 if (e.flags) {
2042 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2043 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2044 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2045 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2046 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2047 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2048 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2049 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2050 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2051 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2052 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2053 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2054 }
2055
2056 if (e.mem_obj && e.mem_obj->smpCollapsed)
2057 os << 'O';
2058
2059 return os << '/' << &e << '*' << e.locks();
2060 }
2061
2062 /* NullStoreEntry */
2063
2064 NullStoreEntry NullStoreEntry::_instance;
2065
2066 NullStoreEntry *
2067 NullStoreEntry::getInstance()
2068 {
2069 return &_instance;
2070 }
2071
2072 char const *
2073 NullStoreEntry::getMD5Text() const
2074 {
2075 return "N/A";
2076 }
2077
2078 void
2079 NullStoreEntry::operator delete(void*)
2080 {
2081 fatal ("Attempt to delete NullStoreEntry\n");
2082 }
2083
2084 char const *
2085 NullStoreEntry::getSerialisedMetaData()
2086 {
2087 return NULL;
2088 }