]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Moved some prototypes to StoreClient.h
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 20 Storage Manager
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "CacheDigest.h"
38 #include "CacheManager.h"
39 #include "comm/Connection.h"
40 #include "ETag.h"
41 #include "event.h"
42 #include "fde.h"
43 #include "http.h"
44 #include "HttpReply.h"
45 #include "HttpRequest.h"
46 #include "mem_node.h"
47 #include "MemObject.h"
48 #include "mgr/Registration.h"
49 #include "mgr/StoreIoAction.h"
50 #include "profiler/Profiler.h"
51 #include "protos.h"
52 #include "repl_modules.h"
53 #include "SquidTime.h"
54 #include "Stack.h"
55 #include "StatCounters.h"
56 #include "stmem.h"
57 #include "store_digest.h"
58 #include "store_key_md5.h"
59 #include "store_key_md5.h"
60 #include "store_log.h"
61 #include "store_rebuild.h"
62 #include "Store.h"
63 #include "StoreClient.h"
64 #include "StoreIOState.h"
65 #include "StoreMeta.h"
66 #include "StrList.h"
67 #include "swap_log_op.h"
68 #include "SwapDir.h"
69 #if USE_DELAY_POOLS
70 #include "DelayPools.h"
71 #endif
72 #if HAVE_LIMITS_H
73 #include <limits.h>
74 #endif
75
76 static STMCB storeWriteComplete;
77
78 #define REBUILD_TIMESTAMP_DELTA_MAX 2
79
80 #define STORE_IN_MEM_BUCKETS (229)
81
82 /** \todo Convert these string constants to enum string-arrays generated */
83
84 const char *memStatusStr[] = {
85 "NOT_IN_MEMORY",
86 "IN_MEMORY"
87 };
88
89 const char *pingStatusStr[] = {
90 "PING_NONE",
91 "PING_WAITING",
92 "PING_DONE"
93 };
94
95 const char *storeStatusStr[] = {
96 "STORE_OK",
97 "STORE_PENDING"
98 };
99
100 const char *swapStatusStr[] = {
101 "SWAPOUT_NONE",
102 "SWAPOUT_WRITING",
103 "SWAPOUT_DONE"
104 };
105
106 /*
107 * This defines an repl type
108 */
109
110 typedef struct _storerepl_entry storerepl_entry_t;
111
112 struct _storerepl_entry {
113 const char *typestr;
114 REMOVALPOLICYCREATE *create;
115 };
116
117 static storerepl_entry_t *storerepl_list = NULL;
118
119 /*
120 * local function prototypes
121 */
122 static int getKeyCounter(void);
123 static OBJH storeCheckCachableStats;
124 static EVH storeLateRelease;
125
126 /*
127 * local variables
128 */
129 static Stack<StoreEntry*> LateReleaseStack;
130 MemAllocator *StoreEntry::pool = NULL;
131
132 StorePointer Store::CurrentRoot = NULL;
133
134 void
135 Store::Root(Store * aRoot)
136 {
137 CurrentRoot = aRoot;
138 }
139
140 void
141 Store::Root(StorePointer aRoot)
142 {
143 Root(aRoot.getRaw());
144 }
145
146 void
147 Store::Stats(StoreEntry * output)
148 {
149 assert (output);
150 Root().stat(*output);
151 }
152
153 void
154 Store::create()
155 {}
156
157 void
158 Store::diskFull()
159 {}
160
161 void
162 Store::sync()
163 {}
164
165 void
166 Store::unlink (StoreEntry &anEntry)
167 {
168 fatal("Store::unlink on invalid Store\n");
169 }
170
171 void *
172 StoreEntry::operator new (size_t bytecount)
173 {
174 assert (bytecount == sizeof (StoreEntry));
175
176 if (!pool) {
177 pool = memPoolCreate ("StoreEntry", bytecount);
178 pool->setChunkSize(2048 * 1024);
179 }
180
181 return pool->alloc();
182 }
183
184 void
185 StoreEntry::operator delete (void *address)
186 {
187 pool->freeOne(address);
188 }
189
190 void
191 StoreEntry::makePublic()
192 {
193 /* This object can be cached for a long time */
194
195 if (EBIT_TEST(flags, ENTRY_CACHABLE))
196 setPublicKey();
197 }
198
199 void
200 StoreEntry::makePrivate()
201 {
202 /* This object should never be cached at all */
203 expireNow();
204 releaseRequest(); /* delete object when not used */
205 /* releaseRequest clears ENTRY_CACHABLE flag */
206 }
207
208 void
209 StoreEntry::cacheNegatively()
210 {
211 /* This object may be negatively cached */
212 negativeCache();
213
214 if (EBIT_TEST(flags, ENTRY_CACHABLE))
215 setPublicKey();
216 }
217
218 size_t
219 StoreEntry::inUseCount()
220 {
221 if (!pool)
222 return 0;
223 return pool->getInUseCount();
224 }
225
226 const char *
227 StoreEntry::getMD5Text() const
228 {
229 return storeKeyText((const cache_key *)key);
230 }
231
232 #include "comm.h"
233
234 void
235 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
236 {
237 StoreEntry *anEntry = (StoreEntry *)theContext;
238 anEntry->delayAwareRead(aRead.conn,
239 aRead.buf,
240 aRead.len,
241 aRead.callback);
242 }
243
244 void
245 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
246 {
247 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
248 /* sketch: readdeferer* = getdeferer.
249 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
250 */
251
252 if (amountToRead == 0) {
253 assert (mem_obj);
254 /* read ahead limit */
255 /* Perhaps these two calls should both live in MemObject */
256 #if USE_DELAY_POOLS
257 if (!mem_obj->readAheadPolicyCanRead()) {
258 #endif
259 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
260 return;
261 #if USE_DELAY_POOLS
262 }
263
264 /* delay id limit */
265 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
266 return;
267
268 #endif
269
270 }
271
272 if (fd_table[conn->fd].closing()) {
273 // Readers must have closing callbacks if they want to be notified. No
274 // readers appeared to care around 2009/12/14 as they skipped reading
275 // for other reasons. Closing may already be true at the delyaAwareRead
276 // call time or may happen while we wait after delayRead() above.
277 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
278 callback);
279 return; // the read callback will never be called
280 }
281
282 comm_read(conn, buf, amountToRead, callback);
283 }
284
285 size_t
286 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
287 {
288 if (mem_obj == NULL)
289 return aRange.end;
290
291 #if URL_CHECKSUM_DEBUG
292
293 mem_obj->checkUrlChecksum();
294
295 #endif
296
297 if (!mem_obj->readAheadPolicyCanRead())
298 return 0;
299
300 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
301 }
302
303 bool
304 StoreEntry::checkDeferRead(int fd) const
305 {
306 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
307 }
308
309 void
310 StoreEntry::setNoDelay (bool const newValue)
311 {
312 if (mem_obj)
313 mem_obj->setNoDelay(newValue);
314 }
315
316 store_client_t
317 StoreEntry::storeClientType() const
318 {
319 /* The needed offset isn't in memory
320 * XXX TODO: this is wrong for range requests
321 * as the needed offset may *not* be 0, AND
322 * offset 0 in the memory object is the HTTP headers.
323 */
324
325 if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
326 // clients of an object cached in shared memory are memory clients
327 return STORE_MEM_CLIENT;
328 }
329
330 assert(mem_obj);
331
332 if (mem_obj->inmem_lo)
333 return STORE_DISK_CLIENT;
334
335 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
336 /* I don't think we should be adding clients to aborted entries */
337 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
338 return STORE_MEM_CLIENT;
339 }
340
341 if (store_status == STORE_OK) {
342 /* the object has completed. */
343
344 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
345 if (swap_status == SWAPOUT_DONE) {
346 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
347 if (mem_obj->endOffset() == mem_obj->object_sz) {
348 /* hot object fully swapped in */
349 return STORE_MEM_CLIENT;
350 }
351 } else {
352 /* Memory-only, or currently being swapped out */
353 return STORE_MEM_CLIENT;
354 }
355 }
356 return STORE_DISK_CLIENT;
357 }
358
359 /* here and past, entry is STORE_PENDING */
360 /*
361 * If this is the first client, let it be the mem client
362 */
363 if (mem_obj->nclients == 1)
364 return STORE_MEM_CLIENT;
365
366 /*
367 * If there is no disk file to open yet, we must make this a
368 * mem client. If we can't open the swapin file before writing
369 * to the client, there is no guarantee that we will be able
370 * to open it later when we really need it.
371 */
372 if (swap_status == SWAPOUT_NONE)
373 return STORE_MEM_CLIENT;
374
375 /*
376 * otherwise, make subsequent clients read from disk so they
377 * can not delay the first, and vice-versa.
378 */
379 return STORE_DISK_CLIENT;
380 }
381
382 StoreEntry::StoreEntry():
383 hidden_mem_obj(NULL),
384 swap_file_sz(0)
385 {
386 debugs(20, 3, HERE << "new StoreEntry " << this);
387 mem_obj = NULL;
388
389 expires = lastmod = lastref = timestamp = -1;
390
391 swap_status = SWAPOUT_NONE;
392 swap_filen = -1;
393 swap_dirn = -1;
394 }
395
396 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl):
397 hidden_mem_obj(NULL),
398 swap_file_sz(0)
399 {
400 debugs(20, 3, HERE << "new StoreEntry " << this);
401 mem_obj = new MemObject(aUrl, aLogUrl);
402
403 expires = lastmod = lastref = timestamp = -1;
404
405 swap_status = SWAPOUT_NONE;
406 swap_filen = -1;
407 swap_dirn = -1;
408 }
409
410 StoreEntry::~StoreEntry()
411 {
412 if (swap_filen >= 0) {
413 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
414 sd.disconnect(*this);
415 }
416 delete hidden_mem_obj;
417 }
418
419 #if USE_ADAPTATION
420 void
421 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
422 {
423 if (!deferredProducer)
424 deferredProducer = producer;
425 else
426 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
427 *deferredProducer << ", requested call: " << *producer);
428 }
429
430 void
431 StoreEntry::kickProducer()
432 {
433 if (deferredProducer != NULL) {
434 ScheduleCallHere(deferredProducer);
435 deferredProducer = NULL;
436 }
437 }
438 #endif
439
440 void
441 StoreEntry::destroyMemObject()
442 {
443 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
444 setMemStatus(NOT_IN_MEMORY);
445 MemObject *mem = mem_obj;
446 mem_obj = NULL;
447 delete mem;
448 delete hidden_mem_obj;
449 hidden_mem_obj = NULL;
450 }
451
452 void
453 StoreEntry::hideMemObject()
454 {
455 debugs(20, 3, HERE << "hiding " << mem_obj);
456 assert(mem_obj);
457 assert(!hidden_mem_obj);
458 hidden_mem_obj = mem_obj;
459 mem_obj = NULL;
460 }
461
462 void
463 destroyStoreEntry(void *data)
464 {
465 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
466 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
467 assert(e != NULL);
468
469 if (e == NullStoreEntry::getInstance())
470 return;
471
472 e->destroyMemObject();
473
474 e->hashDelete();
475
476 assert(e->key == NULL);
477
478 delete e;
479 }
480
481 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
482
483 void
484 StoreEntry::hashInsert(const cache_key * someKey)
485 {
486 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
487 key = storeKeyDup(someKey);
488 hash_join(store_table, this);
489 }
490
491 void
492 StoreEntry::hashDelete()
493 {
494 hash_remove_link(store_table, this);
495 storeKeyFree((const cache_key *)key);
496 key = NULL;
497 }
498
499 /* -------------------------------------------------------------------------- */
500
501 /* get rid of memory copy of the object */
502 void
503 StoreEntry::purgeMem()
504 {
505 if (mem_obj == NULL)
506 return;
507
508 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
509
510 destroyMemObject();
511
512 if (swap_status != SWAPOUT_DONE)
513 release();
514 }
515
516 /* RBC 20050104 this is wrong- memory ref counting
517 * is not at all equivalent to the store 'usage' concept
518 * which the replacement policies should be acting upon.
519 * specifically, object iteration within stores needs
520 * memory ref counting to prevent race conditions,
521 * but this should not influence store replacement.
522 */
523 void
524
525 StoreEntry::lock()
526 {
527 ++lock_count;
528 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
529 lock_count );
530 lastref = squid_curtime;
531 Store::Root().reference(*this);
532 }
533
534 void
535 StoreEntry::setReleaseFlag()
536 {
537 if (EBIT_TEST(flags, RELEASE_REQUEST))
538 return;
539
540 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
541
542 EBIT_SET(flags, RELEASE_REQUEST);
543 }
544
545 void
546 StoreEntry::releaseRequest()
547 {
548 if (EBIT_TEST(flags, RELEASE_REQUEST))
549 return;
550
551 setReleaseFlag();
552
553 /*
554 * Clear cachable flag here because we might get called before
555 * anyone else even looks at the cachability flag. Also, this
556 * prevents httpMakePublic from really setting a public key.
557 */
558 EBIT_CLR(flags, ENTRY_CACHABLE);
559
560 setPrivateKey();
561 }
562
563 /* unlock object, return -1 if object get released after unlock
564 * otherwise lock_count */
565 int
566 StoreEntry::unlock()
567 {
568 --lock_count;
569 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
570
571 if (lock_count)
572 return (int) lock_count;
573
574 if (store_status == STORE_PENDING)
575 setReleaseFlag();
576
577 assert(storePendingNClients(this) == 0);
578
579 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
580 this->release();
581 return 0;
582 }
583
584 if (EBIT_TEST(flags, KEY_PRIVATE))
585 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
586
587 Store::Root().handleIdleEntry(*this); // may delete us
588 return 0;
589 }
590
591 void
592 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
593 {
594 assert (aClient);
595 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
596
597 if (!result)
598 aClient->created (NullStoreEntry::getInstance());
599 else
600 aClient->created (result);
601 }
602
603 void
604 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
605 {
606 assert (aClient);
607 StoreEntry *result = storeGetPublicByRequest (request);
608
609 if (!result)
610 result = NullStoreEntry::getInstance();
611
612 aClient->created (result);
613 }
614
615 void
616 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
617 {
618 assert (aClient);
619 StoreEntry *result = storeGetPublic (uri, method);
620
621 if (!result)
622 result = NullStoreEntry::getInstance();
623
624 aClient->created (result);
625 }
626
627 StoreEntry *
628 storeGetPublic(const char *uri, const HttpRequestMethod& method)
629 {
630 return Store::Root().get(storeKeyPublic(uri, method));
631 }
632
633 StoreEntry *
634 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
635 {
636 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
637 }
638
639 StoreEntry *
640 storeGetPublicByRequest(HttpRequest * req)
641 {
642 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
643
644 if (e == NULL && req->method == METHOD_HEAD)
645 /* We can generate a HEAD reply from a cached GET object */
646 e = storeGetPublicByRequestMethod(req, METHOD_GET);
647
648 return e;
649 }
650
651 static int
652 getKeyCounter(void)
653 {
654 static int key_counter = 0;
655
656 if (++key_counter < 0)
657 key_counter = 1;
658
659 return key_counter;
660 }
661
662 /* RBC 20050104 AFAICT this should become simpler:
663 * rather than reinserting with a special key it should be marked
664 * as 'released' and then cleaned up when refcounting indicates.
665 * the StoreHashIndex could well implement its 'released' in the
666 * current manner.
667 * Also, clean log writing should skip over ia,t
668 * Otherwise, we need a 'remove from the index but not the store
669 * concept'.
670 */
671 void
672 StoreEntry::setPrivateKey()
673 {
674 const cache_key *newkey;
675
676 if (key && EBIT_TEST(flags, KEY_PRIVATE))
677 return; /* is already private */
678
679 if (key) {
680 if (swap_filen > -1)
681 storeDirSwapLog(this, SWAP_LOG_DEL);
682
683 hashDelete();
684 }
685
686 if (mem_obj != NULL) {
687 mem_obj->id = getKeyCounter();
688 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
689 } else {
690 newkey = storeKeyPrivate("JUNK", METHOD_NONE, getKeyCounter());
691 }
692
693 assert(hash_lookup(store_table, newkey) == NULL);
694 EBIT_SET(flags, KEY_PRIVATE);
695 hashInsert(newkey);
696 }
697
698 void
699 StoreEntry::setPublicKey()
700 {
701 StoreEntry *e2 = NULL;
702 const cache_key *newkey;
703
704 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
705 return; /* is already public */
706
707 assert(mem_obj);
708
709 /*
710 * We can't make RELEASE_REQUEST objects public. Depending on
711 * when RELEASE_REQUEST gets set, we might not be swapping out
712 * the object. If we're not swapping out, then subsequent
713 * store clients won't be able to access object data which has
714 * been freed from memory.
715 *
716 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
717 * be set, and StoreEntry::setPublicKey() should not be called.
718 */
719 #if MORE_DEBUG_OUTPUT
720
721 if (EBIT_TEST(flags, RELEASE_REQUEST))
722 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
723
724 #endif
725
726 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
727
728 if (mem_obj->request) {
729 HttpRequest *request = mem_obj->request;
730
731 if (!mem_obj->vary_headers) {
732 /* First handle the case where the object no longer varies */
733 safe_free(request->vary_headers);
734 } else {
735 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
736 /* Oops.. the variance has changed. Kill the base object
737 * to record the new variance key
738 */
739 safe_free(request->vary_headers); /* free old "bad" variance key */
740 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
741
742 if (pe)
743 pe->release();
744 }
745
746 /* Make sure the request knows the variance status */
747 if (!request->vary_headers) {
748 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
749
750 if (vary)
751 request->vary_headers = xstrdup(vary);
752 }
753 }
754
755 // TODO: storeGetPublic() calls below may create unlocked entries.
756 // We should add/use storeHas() API or lock/unlock those entries.
757 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
758 /* Create "vary" base object */
759 String vary;
760 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
761 /* We are allowed to do this typecast */
762 HttpReply *rep = new HttpReply;
763 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
764 vary = mem_obj->getReply()->header.getList(HDR_VARY);
765
766 if (vary.size()) {
767 /* Again, we own this structure layout */
768 rep->header.putStr(HDR_VARY, vary.termedBuf());
769 vary.clean();
770 }
771
772 #if X_ACCELERATOR_VARY
773 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
774
775 if (vary.defined()) {
776 /* Again, we own this structure layout */
777 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
778 vary.clean();
779 }
780
781 #endif
782 pe->replaceHttpReply(rep);
783
784 pe->timestampsSet();
785
786 pe->makePublic();
787
788 pe->complete();
789
790 pe->unlock();
791 }
792
793 newkey = storeKeyPublicByRequest(mem_obj->request);
794 } else
795 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
796
797 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
798 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
799 e2->setPrivateKey();
800 e2->release();
801
802 if (mem_obj->request)
803 newkey = storeKeyPublicByRequest(mem_obj->request);
804 else
805 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
806 }
807
808 if (key)
809 hashDelete();
810
811 EBIT_CLR(flags, KEY_PRIVATE);
812
813 hashInsert(newkey);
814
815 if (swap_filen > -1)
816 storeDirSwapLog(this, SWAP_LOG_ADD);
817 }
818
819 StoreEntry *
820 storeCreateEntry(const char *url, const char *log_url, request_flags flags, const HttpRequestMethod& method)
821 {
822 StoreEntry *e = NULL;
823 MemObject *mem = NULL;
824 debugs(20, 3, "storeCreateEntry: '" << url << "'");
825
826 e = new StoreEntry(url, log_url);
827 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
828 mem = e->mem_obj;
829 mem->method = method;
830
831 if (neighbors_do_private_keys || !flags.hierarchical)
832 e->setPrivateKey();
833 else
834 e->setPublicKey();
835
836 if (flags.cachable) {
837 EBIT_SET(e->flags, ENTRY_CACHABLE);
838 EBIT_CLR(e->flags, RELEASE_REQUEST);
839 } else {
840 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
841 e->releaseRequest();
842 }
843
844 e->store_status = STORE_PENDING;
845 e->setMemStatus(NOT_IN_MEMORY);
846 e->refcount = 0;
847 e->lastref = squid_curtime;
848 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
849 e->ping_status = PING_NONE;
850 EBIT_SET(e->flags, ENTRY_VALIDATED);
851 return e;
852 }
853
854 /* Mark object as expired */
855 void
856 StoreEntry::expireNow()
857 {
858 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
859 expires = squid_curtime;
860 }
861
862 void
863 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
864 {
865 PROF_start(storeWriteComplete);
866 StoreEntry *e = (StoreEntry *)data;
867
868 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
869 PROF_stop(storeWriteComplete);
870 return;
871 }
872
873 e->invokeHandlers();
874 PROF_stop(storeWriteComplete);
875 }
876
877 void
878 StoreEntry::write (StoreIOBuffer writeBuffer)
879 {
880 assert(mem_obj != NULL);
881 /* This assert will change when we teach the store to update */
882 PROF_start(StoreEntry_write);
883 assert(store_status == STORE_PENDING);
884
885 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
886 PROF_stop(StoreEntry_write);
887 storeGetMemSpace(writeBuffer.length);
888 mem_obj->write (writeBuffer, storeWriteComplete, this);
889 }
890
891 /* Append incoming data from a primary server to an entry. */
892 void
893 StoreEntry::append(char const *buf, int len)
894 {
895 assert(mem_obj != NULL);
896 assert(len >= 0);
897 assert(store_status == STORE_PENDING);
898
899 StoreIOBuffer tempBuffer;
900 tempBuffer.data = (char *)buf;
901 tempBuffer.length = len;
902 /*
903 * XXX sigh, offset might be < 0 here, but it gets "corrected"
904 * later. This offset crap is such a mess.
905 */
906 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
907 write(tempBuffer);
908 }
909
910 void
911 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
912 {
913 va_list args;
914 va_start(args, fmt);
915
916 storeAppendVPrintf(e, fmt, args);
917 va_end(args);
918 }
919
920 /* used be storeAppendPrintf and Packer */
921 void
922 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
923 {
924 LOCAL_ARRAY(char, buf, 4096);
925 buf[0] = '\0';
926 vsnprintf(buf, 4096, fmt, vargs);
927 e->append(buf, strlen(buf));
928 }
929
930 struct _store_check_cachable_hist {
931
932 struct {
933 int non_get;
934 int not_entry_cachable;
935 int wrong_content_length;
936 int negative_cached;
937 int too_big;
938 int too_small;
939 int private_key;
940 int too_many_open_files;
941 int too_many_open_fds;
942 } no;
943
944 struct {
945 int Default;
946 } yes;
947 } store_check_cachable_hist;
948
949 int
950 storeTooManyDiskFilesOpen(void)
951 {
952 if (Config.max_open_disk_fds == 0)
953 return 0;
954
955 if (store_open_disk_fd > Config.max_open_disk_fds)
956 return 1;
957
958 return 0;
959 }
960
961 int
962 StoreEntry::checkTooSmall()
963 {
964 if (EBIT_TEST(flags, ENTRY_SPECIAL))
965 return 0;
966
967 if (STORE_OK == store_status)
968 if (mem_obj->object_sz < 0 ||
969 mem_obj->object_sz < Config.Store.minObjectSize)
970 return 1;
971 if (getReply()->content_length > -1)
972 if (getReply()->content_length < Config.Store.minObjectSize)
973 return 1;
974 return 0;
975 }
976
977 // TODO: remove checks already performed by swapoutPossible()
978 // TODO: move "too many open..." checks outside -- we are called too early/late
979 int
980 StoreEntry::checkCachable()
981 {
982 #if CACHE_ALL_METHODS
983
984 if (mem_obj->method != METHOD_GET) {
985 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
986 ++store_check_cachable_hist.no.non_get;
987 } else
988 #endif
989 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
991 ++store_check_cachable_hist.no.wrong_content_length;
992 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
993 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
994 ++store_check_cachable_hist.no.not_entry_cachable;
995 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
996 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
997 ++store_check_cachable_hist.no.negative_cached;
998 return 0; /* avoid release call below */
999 } else if ((getReply()->content_length > 0 &&
1000 getReply()->content_length
1001 > Config.Store.maxObjectSize) ||
1002 mem_obj->endOffset() > Config.Store.maxObjectSize) {
1003 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1004 ++store_check_cachable_hist.no.too_big;
1005 } else if (getReply()->content_length > Config.Store.maxObjectSize) {
1006 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1007 ++store_check_cachable_hist.no.too_big;
1008 } else if (checkTooSmall()) {
1009 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1010 ++store_check_cachable_hist.no.too_small;
1011 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1012 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1013 ++store_check_cachable_hist.no.private_key;
1014 } else if (swap_status != SWAPOUT_NONE) {
1015 /*
1016 * here we checked the swap_status because the remaining
1017 * cases are only relevant only if we haven't started swapping
1018 * out the object yet.
1019 */
1020 return 1;
1021 } else if (storeTooManyDiskFilesOpen()) {
1022 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1023 ++store_check_cachable_hist.no.too_many_open_files;
1024 } else if (fdNFree() < RESERVED_FD) {
1025 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1026 ++store_check_cachable_hist.no.too_many_open_fds;
1027 } else {
1028 ++store_check_cachable_hist.yes.Default;
1029 return 1;
1030 }
1031
1032 releaseRequest();
1033 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1034 return 0;
1035 }
1036
1037 void
1038 storeCheckCachableStats(StoreEntry *sentry)
1039 {
1040 storeAppendPrintf(sentry, "Category\t Count\n");
1041
1042 #if CACHE_ALL_METHODS
1043
1044 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1045 store_check_cachable_hist.no.non_get);
1046 #endif
1047
1048 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1049 store_check_cachable_hist.no.not_entry_cachable);
1050 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1051 store_check_cachable_hist.no.wrong_content_length);
1052 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1053 store_check_cachable_hist.no.negative_cached);
1054 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1055 store_check_cachable_hist.no.too_big);
1056 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1057 store_check_cachable_hist.no.too_small);
1058 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1059 store_check_cachable_hist.no.private_key);
1060 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1061 store_check_cachable_hist.no.too_many_open_files);
1062 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1063 store_check_cachable_hist.no.too_many_open_fds);
1064 storeAppendPrintf(sentry, "yes.default\t%d\n",
1065 store_check_cachable_hist.yes.Default);
1066 }
1067
1068 void
1069 StoreEntry::complete()
1070 {
1071 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1072
1073 if (store_status != STORE_PENDING) {
1074 /*
1075 * if we're not STORE_PENDING, then probably we got aborted
1076 * and there should be NO clients on this entry
1077 */
1078 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1079 assert(mem_obj->nclients == 0);
1080 return;
1081 }
1082
1083 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1084 * in use of object_sz?
1085 */
1086 mem_obj->object_sz = mem_obj->endOffset();
1087
1088 store_status = STORE_OK;
1089
1090 assert(mem_status == NOT_IN_MEMORY);
1091
1092 if (!validLength()) {
1093 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1094 releaseRequest();
1095 }
1096
1097 #if USE_CACHE_DIGESTS
1098 if (mem_obj->request)
1099 mem_obj->request->hier.store_complete_stop = current_time;
1100
1101 #endif
1102 /*
1103 * We used to call invokeHandlers, then storeSwapOut. However,
1104 * Madhukar Reddy <myreddy@persistence.com> reported that
1105 * responses without content length would sometimes get released
1106 * in client_side, thinking that the response is incomplete.
1107 */
1108 invokeHandlers();
1109 }
1110
1111 /*
1112 * Someone wants to abort this transfer. Set the reason in the
1113 * request structure, call the server-side callback and mark the
1114 * entry for releasing
1115 */
1116 void
1117 StoreEntry::abort()
1118 {
1119 ++statCounter.aborted_requests;
1120 assert(store_status == STORE_PENDING);
1121 assert(mem_obj != NULL);
1122 debugs(20, 6, "storeAbort: " << getMD5Text());
1123
1124 lock(); /* lock while aborting */
1125 negativeCache();
1126
1127 releaseRequest();
1128
1129 EBIT_SET(flags, ENTRY_ABORTED);
1130
1131 setMemStatus(NOT_IN_MEMORY);
1132
1133 store_status = STORE_OK;
1134
1135 /* Notify the server side */
1136
1137 /*
1138 * DPW 2007-05-07
1139 * Should we check abort.data for validity?
1140 */
1141 if (mem_obj->abort.callback) {
1142 if (!cbdataReferenceValid(mem_obj->abort.data))
1143 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1144 eventAdd("mem_obj->abort.callback",
1145 mem_obj->abort.callback,
1146 mem_obj->abort.data,
1147 0.0,
1148 true);
1149 unregisterAbort();
1150 }
1151
1152 /* XXX Should we reverse these two, so that there is no
1153 * unneeded disk swapping triggered?
1154 */
1155 /* Notify the client side */
1156 invokeHandlers();
1157
1158 // abort swap out, invalidating what was created so far (release follows)
1159 swapOutFileClose(StoreIOState::writerGone);
1160
1161 unlock(); /* unlock */
1162 }
1163
1164 /**
1165 * Clear Memory storage to accommodate the given object len
1166 */
1167 void
1168 storeGetMemSpace(int size)
1169 {
1170 PROF_start(storeGetMemSpace);
1171 StoreEntry *e = NULL;
1172 int released = 0;
1173 static time_t last_check = 0;
1174 size_t pages_needed;
1175 RemovalPurgeWalker *walker;
1176
1177 if (squid_curtime == last_check) {
1178 PROF_stop(storeGetMemSpace);
1179 return;
1180 }
1181
1182 last_check = squid_curtime;
1183
1184 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1185
1186 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1187 PROF_stop(storeGetMemSpace);
1188 return;
1189 }
1190
1191 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1192 " pages");
1193
1194 /* XXX what to set as max_scan here? */
1195 walker = mem_policy->PurgeInit(mem_policy, 100000);
1196
1197 while ((e = walker->Next(walker))) {
1198 e->purgeMem();
1199 ++released;
1200
1201 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1202 break;
1203 }
1204
1205 walker->Done(walker);
1206 debugs(20, 3, "storeGetMemSpace stats:");
1207 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1208 debugs(20, 3, " " << std::setw(6) << released << " were released");
1209 PROF_stop(storeGetMemSpace);
1210 }
1211
1212 /* thunk through to Store::Root().maintain(). Note that this would be better still
1213 * if registered against the root store itself, but that requires more complex
1214 * update logic - bigger fish to fry first. Long term each store when
1215 * it becomes active will self register
1216 */
1217 void
1218 Store::Maintain(void *notused)
1219 {
1220 Store::Root().maintain();
1221
1222 /* Reregister a maintain event .. */
1223 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1224
1225 }
1226
1227 /* The maximum objects to scan for maintain storage space */
1228 #define MAINTAIN_MAX_SCAN 1024
1229 #define MAINTAIN_MAX_REMOVE 64
1230
1231 /*
1232 * This routine is to be called by main loop in main.c.
1233 * It removes expired objects on only one bucket for each time called.
1234 *
1235 * This should get called 1/s from main().
1236 */
1237 void
1238 StoreController::maintain()
1239 {
1240 static time_t last_warn_time = 0;
1241
1242 PROF_start(storeMaintainSwapSpace);
1243 swapDir->maintain();
1244
1245 /* this should be emitted by the oversize dir, not globally */
1246
1247 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1248 if (squid_curtime - last_warn_time > 10) {
1249 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1250 << Store::Root().currentSize() / 1024.0 << " KB > "
1251 << (Store::Root().maxSize() >> 10) << " KB");
1252 last_warn_time = squid_curtime;
1253 }
1254 }
1255
1256 PROF_stop(storeMaintainSwapSpace);
1257 }
1258
1259 /* release an object from a cache */
1260 void
1261 StoreEntry::release()
1262 {
1263 PROF_start(storeRelease);
1264 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1265 /* If, for any reason we can't discard this object because of an
1266 * outstanding request, mark it for pending release */
1267
1268 if (locked()) {
1269 expireNow();
1270 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1271 releaseRequest();
1272 PROF_stop(storeRelease);
1273 return;
1274 }
1275
1276 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1277 setPrivateKey();
1278
1279 if (mem_obj)
1280 destroyMemObject();
1281
1282 if (swap_filen > -1) {
1283 /*
1284 * Fake a call to StoreEntry->lock() When rebuilding is done,
1285 * we'll just call StoreEntry->unlock() on these.
1286 */
1287 ++lock_count;
1288 setReleaseFlag();
1289 LateReleaseStack.push_back(this);
1290 } else {
1291 destroyStoreEntry(static_cast<hash_link *>(this));
1292 // "this" is no longer valid
1293 }
1294
1295 PROF_stop(storeRelease);
1296 return;
1297 }
1298
1299 storeLog(STORE_LOG_RELEASE, this);
1300
1301 if (swap_filen > -1) {
1302 // log before unlink() below clears swap_filen
1303 if (!EBIT_TEST(flags, KEY_PRIVATE))
1304 storeDirSwapLog(this, SWAP_LOG_DEL);
1305
1306 unlink();
1307 }
1308
1309 setMemStatus(NOT_IN_MEMORY);
1310 destroyStoreEntry(static_cast<hash_link *>(this));
1311 PROF_stop(storeRelease);
1312 }
1313
1314 static void
1315 storeLateRelease(void *unused)
1316 {
1317 StoreEntry *e;
1318 int i;
1319 static int n = 0;
1320
1321 if (StoreController::store_dirs_rebuilding) {
1322 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1323 return;
1324 }
1325
1326 for (i = 0; i < 10; ++i) {
1327 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1328
1329 if (e == NULL) {
1330 /* done! */
1331 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1332 return;
1333 }
1334
1335 e->unlock();
1336 ++n;
1337 }
1338
1339 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1340 }
1341
1342 /* return 1 if a store entry is locked */
1343 int
1344 StoreEntry::locked() const
1345 {
1346 if (lock_count)
1347 return 1;
1348
1349 if (swap_status == SWAPOUT_WRITING)
1350 return 1;
1351
1352 if (store_status == STORE_PENDING)
1353 return 1;
1354
1355 /*
1356 * SPECIAL, PUBLIC entries should be "locked"
1357 */
1358 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1359 if (!EBIT_TEST(flags, KEY_PRIVATE))
1360 return 1;
1361
1362 return 0;
1363 }
1364
1365 bool
1366 StoreEntry::validLength() const
1367 {
1368 int64_t diff;
1369 const HttpReply *reply;
1370 assert(mem_obj != NULL);
1371 reply = getReply();
1372 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1373 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1374 objectLen());
1375 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1376 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1377
1378 if (reply->content_length < 0) {
1379 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1380 return 1;
1381 }
1382
1383 if (reply->hdr_sz == 0) {
1384 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1385 return 1;
1386 }
1387
1388 if (mem_obj->method == METHOD_HEAD) {
1389 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1390 return 1;
1391 }
1392
1393 if (reply->sline.status == HTTP_NOT_MODIFIED)
1394 return 1;
1395
1396 if (reply->sline.status == HTTP_NO_CONTENT)
1397 return 1;
1398
1399 diff = reply->hdr_sz + reply->content_length - objectLen();
1400
1401 if (diff == 0)
1402 return 1;
1403
1404 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1405
1406 return 0;
1407 }
1408
1409 static void
1410 storeRegisterWithCacheManager(void)
1411 {
1412 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1413 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1414 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1415 storeCheckCachableStats, 0, 1);
1416 }
1417
1418 void
1419 storeInit(void)
1420 {
1421 storeKeyInit();
1422 mem_policy = createRemovalPolicy(Config.memPolicy);
1423 storeDigestInit();
1424 storeLogOpen();
1425 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1426 Store::Root().init();
1427 storeRebuildStart();
1428
1429 storeRegisterWithCacheManager();
1430 }
1431
1432 void
1433 storeConfigure(void)
1434 {
1435 store_swap_high = (long) (((float) Store::Root().maxSize() *
1436 (float) Config.Swap.highWaterMark) / (float) 100);
1437 store_swap_low = (long) (((float) Store::Root().maxSize() *
1438 (float) Config.Swap.lowWaterMark) / (float) 100);
1439 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1440 }
1441
1442 bool
1443 StoreEntry::memoryCachable() const
1444 {
1445 if (mem_obj == NULL)
1446 return 0;
1447
1448 if (mem_obj->data_hdr.size() == 0)
1449 return 0;
1450
1451 if (mem_obj->inmem_lo != 0)
1452 return 0;
1453
1454 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1455 return 0;
1456
1457 return 1;
1458 }
1459
1460 int
1461 StoreEntry::checkNegativeHit() const
1462 {
1463 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1464 return 0;
1465
1466 if (expires <= squid_curtime)
1467 return 0;
1468
1469 if (store_status != STORE_OK)
1470 return 0;
1471
1472 return 1;
1473 }
1474
1475 /**
1476 * Set object for negative caching.
1477 * Preserves any expiry information given by the server.
1478 * In absence of proper expiry info it will set to expire immediately,
1479 * or with HTTP-violations enabled the configured negative-TTL is observed
1480 */
1481 void
1482 StoreEntry::negativeCache()
1483 {
1484 // XXX: should make the default for expires 0 instead of -1
1485 // so we can distinguish "Expires: -1" from nothing.
1486 if (expires <= 0)
1487 #if USE_HTTP_VIOLATIONS
1488 expires = squid_curtime + Config.negativeTtl;
1489 #else
1490 expires = squid_curtime;
1491 #endif
1492 EBIT_SET(flags, ENTRY_NEGCACHED);
1493 }
1494
1495 void
1496 storeFreeMemory(void)
1497 {
1498 Store::Root(NULL);
1499 #if USE_CACHE_DIGESTS
1500
1501 if (store_digest)
1502 cacheDigestDestroy(store_digest);
1503
1504 #endif
1505
1506 store_digest = NULL;
1507 }
1508
1509 int
1510 expiresMoreThan(time_t expires, time_t when)
1511 {
1512 if (expires < 0) /* No Expires given */
1513 return 1;
1514
1515 return (expires > (squid_curtime + when));
1516 }
1517
1518 int
1519 StoreEntry::validToSend() const
1520 {
1521 if (EBIT_TEST(flags, RELEASE_REQUEST))
1522 return 0;
1523
1524 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1525 if (expires <= squid_curtime)
1526 return 0;
1527
1528 if (EBIT_TEST(flags, ENTRY_ABORTED))
1529 return 0;
1530
1531 return 1;
1532 }
1533
1534 void
1535 StoreEntry::timestampsSet()
1536 {
1537 const HttpReply *reply = getReply();
1538 time_t served_date = reply->date;
1539 int age = reply->header.getInt(HDR_AGE);
1540 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1541 /* make sure that 0 <= served_date <= squid_curtime */
1542
1543 if (served_date < 0 || served_date > squid_curtime)
1544 served_date = squid_curtime;
1545
1546 /* Bug 1791:
1547 * If the returned Date: is more than 24 hours older than
1548 * the squid_curtime, then one of us needs to use NTP to set our
1549 * clock. We'll pretend that our clock is right.
1550 */
1551 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1552 served_date = squid_curtime;
1553
1554 /*
1555 * Compensate with Age header if origin server clock is ahead
1556 * of us and there is a cache in between us and the origin
1557 * server. But DONT compensate if the age value is larger than
1558 * squid_curtime because it results in a negative served_date.
1559 */
1560 if (age > squid_curtime - served_date)
1561 if (squid_curtime > age)
1562 served_date = squid_curtime - age;
1563
1564 // compensate for Squid-to-server and server-to-Squid delays
1565 if (mem_obj && mem_obj->request) {
1566 const time_t request_sent =
1567 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1568 if (0 < request_sent && request_sent < squid_curtime)
1569 served_date -= (squid_curtime - request_sent);
1570 }
1571
1572 if (reply->expires > 0 && reply->date > -1)
1573 expires = served_date + (reply->expires - reply->date);
1574 else
1575 expires = reply->expires;
1576
1577 lastmod = reply->last_modified;
1578
1579 timestamp = served_date;
1580 }
1581
1582 void
1583 StoreEntry::registerAbort(STABH * cb, void *data)
1584 {
1585 assert(mem_obj);
1586 assert(mem_obj->abort.callback == NULL);
1587 mem_obj->abort.callback = cb;
1588 mem_obj->abort.data = cbdataReference(data);
1589 }
1590
1591 void
1592 StoreEntry::unregisterAbort()
1593 {
1594 assert(mem_obj);
1595 if (mem_obj->abort.callback) {
1596 mem_obj->abort.callback = NULL;
1597 cbdataReferenceDone(mem_obj->abort.data);
1598 }
1599 }
1600
1601 void
1602 StoreEntry::dump(int l) const
1603 {
1604 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1605 debugs(20, l, "StoreEntry->next: " << next);
1606 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1607 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1608 debugs(20, l, "StoreEntry->lastref: " << lastref);
1609 debugs(20, l, "StoreEntry->expires: " << expires);
1610 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1611 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1612 debugs(20, l, "StoreEntry->refcount: " << refcount);
1613 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1614 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1615 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1616 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1617 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1618 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1619 debugs(20, l, "StoreEntry->store_status: " << store_status);
1620 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1621 }
1622
1623 /*
1624 * NOTE, this function assumes only two mem states
1625 */
1626 void
1627 StoreEntry::setMemStatus(mem_status_t new_status)
1628 {
1629 if (new_status == mem_status)
1630 return;
1631
1632 // are we using a shared memory cache?
1633 if (Config.memShared && IamWorkerProcess()) {
1634 // enumerate calling cases if shared memory is enabled
1635 assert(new_status != IN_MEMORY || EBIT_TEST(flags, ENTRY_SPECIAL));
1636 // This method was designed to update replacement policy, not to
1637 // actually purge something from the memory cache (TODO: rename?).
1638 // Shared memory cache does not have a policy that needs updates.
1639 mem_status = new_status;
1640 return;
1641 }
1642
1643 assert(mem_obj != NULL);
1644
1645 if (new_status == IN_MEMORY) {
1646 assert(mem_obj->inmem_lo == 0);
1647
1648 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1649 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1650 } else {
1651 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1652 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1653 }
1654
1655 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1656 } else {
1657 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1658 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1659 } else {
1660 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1661 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1662 }
1663
1664 --hot_obj_count;
1665 }
1666
1667 mem_status = new_status;
1668 }
1669
1670 const char *
1671 StoreEntry::url() const
1672 {
1673 if (this == NULL)
1674 return "[null_entry]";
1675 else if (mem_obj == NULL)
1676 return "[null_mem_obj]";
1677 else
1678 return mem_obj->url;
1679 }
1680
1681 void
1682 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1683 {
1684 if (mem_obj)
1685 return;
1686
1687 if (hidden_mem_obj) {
1688 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1689 mem_obj = hidden_mem_obj;
1690 hidden_mem_obj = NULL;
1691 mem_obj->resetUrls(aUrl, aLogUrl);
1692 return;
1693 }
1694
1695 mem_obj = new MemObject(aUrl, aLogUrl);
1696 }
1697
1698 /* this just sets DELAY_SENDING */
1699 void
1700 StoreEntry::buffer()
1701 {
1702 EBIT_SET(flags, DELAY_SENDING);
1703 }
1704
1705 /* this just clears DELAY_SENDING and Invokes the handlers */
1706 void
1707 StoreEntry::flush()
1708 {
1709 if (EBIT_TEST(flags, DELAY_SENDING)) {
1710 EBIT_CLR(flags, DELAY_SENDING);
1711 invokeHandlers();
1712 }
1713 }
1714
1715 int64_t
1716 StoreEntry::objectLen() const
1717 {
1718 assert(mem_obj != NULL);
1719 return mem_obj->object_sz;
1720 }
1721
1722 int64_t
1723 StoreEntry::contentLen() const
1724 {
1725 assert(mem_obj != NULL);
1726 assert(getReply() != NULL);
1727 return objectLen() - getReply()->hdr_sz;
1728 }
1729
1730 HttpReply const *
1731 StoreEntry::getReply () const
1732 {
1733 if (NULL == mem_obj)
1734 return NULL;
1735
1736 return mem_obj->getReply();
1737 }
1738
1739 void
1740 StoreEntry::reset()
1741 {
1742 assert (mem_obj);
1743 debugs(20, 3, "StoreEntry::reset: " << url());
1744 mem_obj->reset();
1745 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1746 rep->reset();
1747 expires = lastmod = timestamp = -1;
1748 }
1749
1750 /*
1751 * storeFsInit
1752 *
1753 * This routine calls the SETUP routine for each fs type.
1754 * I don't know where the best place for this is, and I'm not going to shuffle
1755 * around large chunks of code right now (that can be done once its working.)
1756 */
1757 void
1758 storeFsInit(void)
1759 {
1760 storeReplSetup();
1761 }
1762
1763 /*
1764 * called to add another store removal policy module
1765 */
1766 void
1767 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1768 {
1769 int i;
1770
1771 /* find the number of currently known repl types */
1772 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1773 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1774 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1775 return;
1776 }
1777 }
1778
1779 /* add the new type */
1780 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1781
1782 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1783
1784 storerepl_list[i].typestr = type;
1785
1786 storerepl_list[i].create = create;
1787 }
1788
1789 /*
1790 * Create a removal policy instance
1791 */
1792 RemovalPolicy *
1793 createRemovalPolicy(RemovalPolicySettings * settings)
1794 {
1795 storerepl_entry_t *r;
1796
1797 for (r = storerepl_list; r && r->typestr; ++r) {
1798 if (strcmp(r->typestr, settings->type) == 0)
1799 return r->create(settings->args);
1800 }
1801
1802 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1803 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1804 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1805 fatalf("ERROR: Unknown policy %s\n", settings->type);
1806 return NULL; /* NOTREACHED */
1807 }
1808
1809 #if 0
1810 void
1811 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1812 {
1813 if (e->swap_file_number == filn)
1814 return;
1815
1816 if (filn < 0) {
1817 assert(-1 == filn);
1818 storeDirMapBitReset(e->swap_file_number);
1819 storeDirLRUDelete(e);
1820 e->swap_file_number = -1;
1821 } else {
1822 assert(-1 == e->swap_file_number);
1823 storeDirMapBitSet(e->swap_file_number = filn);
1824 storeDirLRUAdd(e);
1825 }
1826 }
1827
1828 #endif
1829
1830 /*
1831 * Replace a store entry with
1832 * a new reply. This eats the reply.
1833 */
1834 void
1835 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1836 {
1837 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1838
1839 if (!mem_obj) {
1840 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1841 return;
1842 }
1843
1844 mem_obj->replaceHttpReply(rep);
1845
1846 if (andStartWriting)
1847 startWriting();
1848 }
1849
1850 void
1851 StoreEntry::startWriting()
1852 {
1853 Packer p;
1854
1855 /* TODO: when we store headers serparately remove the header portion */
1856 /* TODO: mark the length of the headers ? */
1857 /* We ONLY want the headers */
1858 packerToStoreInit(&p, this);
1859
1860 assert (isEmpty());
1861 assert(mem_obj);
1862
1863 const HttpReply *rep = getReply();
1864 assert(rep);
1865
1866 rep->packHeadersInto(&p);
1867 mem_obj->markEndOfReplyHeaders();
1868
1869 rep->body.packInto(&p);
1870
1871 packerClean(&p);
1872 }
1873
1874 char const *
1875 StoreEntry::getSerialisedMetaData()
1876 {
1877 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1878 int swap_hdr_sz;
1879 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1880 storeSwapTLVFree(tlv_list);
1881 assert (swap_hdr_sz >= 0);
1882 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1883 return result;
1884 }
1885
1886 void
1887 StoreEntry::trimMemory(const bool preserveSwappable)
1888 {
1889 /*
1890 * DPW 2007-05-09
1891 * Bug #1943. We must not let go any data for IN_MEMORY
1892 * objects. We have to wait until the mem_status changes.
1893 */
1894 if (mem_status == IN_MEMORY)
1895 return;
1896
1897 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1898 return; // cannot trim because we do not load them again
1899
1900 if (!preserveSwappable) {
1901 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1902 /* Nothing to do */
1903 return;
1904 }
1905 /*
1906 * Its not swap-able, and we're about to delete a chunk,
1907 * so we must make it PRIVATE. This is tricky/ugly because
1908 * for the most part, we treat swapable == cachable here.
1909 */
1910 releaseRequest();
1911 mem_obj->trimUnSwappable ();
1912 } else {
1913 mem_obj->trimSwappable ();
1914 }
1915 }
1916
1917 bool
1918 StoreEntry::modifiedSince(HttpRequest * request) const
1919 {
1920 int object_length;
1921 time_t mod_time = lastmod;
1922
1923 if (mod_time < 0)
1924 mod_time = timestamp;
1925
1926 debugs(88, 3, "modifiedSince: '" << url() << "'");
1927
1928 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1929
1930 if (mod_time < 0)
1931 return true;
1932
1933 /* Find size of the object */
1934 object_length = getReply()->content_length;
1935
1936 if (object_length < 0)
1937 object_length = contentLen();
1938
1939 if (mod_time > request->ims) {
1940 debugs(88, 3, "--> YES: entry newer than client");
1941 return true;
1942 } else if (mod_time < request->ims) {
1943 debugs(88, 3, "--> NO: entry older than client");
1944 return false;
1945 } else if (request->imslen < 0) {
1946 debugs(88, 3, "--> NO: same LMT, no client length");
1947 return false;
1948 } else if (request->imslen == object_length) {
1949 debugs(88, 3, "--> NO: same LMT, same length");
1950 return false;
1951 } else {
1952 debugs(88, 3, "--> YES: same LMT, different length");
1953 return true;
1954 }
1955 }
1956
1957 bool
1958 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1959 {
1960 const String reqETags = request.header.getList(HDR_IF_MATCH);
1961 return hasOneOfEtags(reqETags, false);
1962 }
1963
1964 bool
1965 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1966 {
1967 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1968 // weak comparison is allowed only for HEAD or full-body GET requests
1969 const bool allowWeakMatch = !request.flags.range &&
1970 (request.method == METHOD_GET || request.method == METHOD_HEAD);
1971 return hasOneOfEtags(reqETags, allowWeakMatch);
1972 }
1973
1974 /// whether at least one of the request ETags matches entity ETag
1975 bool
1976 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1977 {
1978 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1979 if (!repETag.str)
1980 return strListIsMember(&reqETags, "*", ',');
1981
1982 bool matched = false;
1983 const char *pos = NULL;
1984 const char *item;
1985 int ilen;
1986 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1987 if (!strncmp(item, "*", ilen))
1988 matched = true;
1989 else {
1990 String str;
1991 str.append(item, ilen);
1992 ETag reqETag;
1993 if (etagParseInit(&reqETag, str.termedBuf())) {
1994 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1995 etagIsStrongEqual(repETag, reqETag);
1996 }
1997 }
1998 }
1999 return matched;
2000 }
2001
2002 SwapDir::Pointer
2003 StoreEntry::store() const
2004 {
2005 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2006 return INDEXSD(swap_dirn);
2007 }
2008
2009 void
2010 StoreEntry::unlink()
2011 {
2012 store()->unlink(*this); // implies disconnect()
2013 swap_filen = -1;
2014 swap_dirn = -1;
2015 swap_status = SWAPOUT_NONE;
2016 }
2017
2018 /*
2019 * return true if the entry is in a state where
2020 * it can accept more data (ie with write() method)
2021 */
2022 bool
2023 StoreEntry::isAccepting() const
2024 {
2025 if (STORE_PENDING != store_status)
2026 return false;
2027
2028 if (EBIT_TEST(flags, ENTRY_ABORTED))
2029 return false;
2030
2031 return true;
2032 }
2033
2034 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2035 {
2036 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2037 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2038 e.swap_status;
2039 }
2040
2041 /* NullStoreEntry */
2042
2043 NullStoreEntry NullStoreEntry::_instance;
2044
2045 NullStoreEntry *
2046 NullStoreEntry::getInstance()
2047 {
2048 return &_instance;
2049 }
2050
2051 char const *
2052 NullStoreEntry::getMD5Text() const
2053 {
2054 return "N/A";
2055 }
2056
2057 void
2058 NullStoreEntry::operator delete(void*)
2059 {
2060 fatal ("Attempt to delete NullStoreEntry\n");
2061 }
2062
2063 char const *
2064 NullStoreEntry::getSerialisedMetaData()
2065 {
2066 return NULL;
2067 }
2068
2069 #if !_USE_INLINE_
2070 #include "Store.cci"
2071 #endif