]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Moved prototype to newly-created repl_modules.h
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 20 Storage Manager
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "CacheDigest.h"
38 #include "CacheManager.h"
39 #include "comm/Connection.h"
40 #include "ETag.h"
41 #include "event.h"
42 #include "fde.h"
43 #include "http.h"
44 #include "HttpReply.h"
45 #include "HttpRequest.h"
46 #include "mem_node.h"
47 #include "MemObject.h"
48 #include "mgr/Registration.h"
49 #include "mgr/StoreIoAction.h"
50 #include "profiler/Profiler.h"
51 #include "protos.h"
52 #include "repl_modules.h"
53 #include "SquidTime.h"
54 #include "Stack.h"
55 #include "StatCounters.h"
56 #include "stmem.h"
57 #include "StoreClient.h"
58 #include "Store.h"
59 #include "store_key_md5.h"
60 #include "StoreIOState.h"
61 #include "StoreMeta.h"
62 #include "StrList.h"
63 #include "store_key_md5.h"
64 #include "SwapDir.h"
65 #include "swap_log_op.h"
66 #if USE_DELAY_POOLS
67 #include "DelayPools.h"
68 #endif
69 #if HAVE_LIMITS_H
70 #include <limits.h>
71 #endif
72
73 static STMCB storeWriteComplete;
74
75 #define REBUILD_TIMESTAMP_DELTA_MAX 2
76
77 #define STORE_IN_MEM_BUCKETS (229)
78
79 /** \todo Convert these string constants to enum string-arrays generated */
80
81 const char *memStatusStr[] = {
82 "NOT_IN_MEMORY",
83 "IN_MEMORY"
84 };
85
86 const char *pingStatusStr[] = {
87 "PING_NONE",
88 "PING_WAITING",
89 "PING_DONE"
90 };
91
92 const char *storeStatusStr[] = {
93 "STORE_OK",
94 "STORE_PENDING"
95 };
96
97 const char *swapStatusStr[] = {
98 "SWAPOUT_NONE",
99 "SWAPOUT_WRITING",
100 "SWAPOUT_DONE"
101 };
102
103 /*
104 * This defines an repl type
105 */
106
107 typedef struct _storerepl_entry storerepl_entry_t;
108
109 struct _storerepl_entry {
110 const char *typestr;
111 REMOVALPOLICYCREATE *create;
112 };
113
114 static storerepl_entry_t *storerepl_list = NULL;
115
116 /*
117 * local function prototypes
118 */
119 static int getKeyCounter(void);
120 static OBJH storeCheckCachableStats;
121 static EVH storeLateRelease;
122
123 /*
124 * local variables
125 */
126 static Stack<StoreEntry*> LateReleaseStack;
127 MemAllocator *StoreEntry::pool = NULL;
128
129 StorePointer Store::CurrentRoot = NULL;
130
131 void
132 Store::Root(Store * aRoot)
133 {
134 CurrentRoot = aRoot;
135 }
136
137 void
138 Store::Root(StorePointer aRoot)
139 {
140 Root(aRoot.getRaw());
141 }
142
143 void
144 Store::Stats(StoreEntry * output)
145 {
146 assert (output);
147 Root().stat(*output);
148 }
149
150 void
151 Store::create()
152 {}
153
154 void
155 Store::diskFull()
156 {}
157
158 void
159 Store::sync()
160 {}
161
162 void
163 Store::unlink (StoreEntry &anEntry)
164 {
165 fatal("Store::unlink on invalid Store\n");
166 }
167
168 void *
169 StoreEntry::operator new (size_t bytecount)
170 {
171 assert (bytecount == sizeof (StoreEntry));
172
173 if (!pool) {
174 pool = memPoolCreate ("StoreEntry", bytecount);
175 pool->setChunkSize(2048 * 1024);
176 }
177
178 return pool->alloc();
179 }
180
181 void
182 StoreEntry::operator delete (void *address)
183 {
184 pool->freeOne(address);
185 }
186
187 void
188 StoreEntry::makePublic()
189 {
190 /* This object can be cached for a long time */
191
192 if (EBIT_TEST(flags, ENTRY_CACHABLE))
193 setPublicKey();
194 }
195
196 void
197 StoreEntry::makePrivate()
198 {
199 /* This object should never be cached at all */
200 expireNow();
201 releaseRequest(); /* delete object when not used */
202 /* releaseRequest clears ENTRY_CACHABLE flag */
203 }
204
205 void
206 StoreEntry::cacheNegatively()
207 {
208 /* This object may be negatively cached */
209 negativeCache();
210
211 if (EBIT_TEST(flags, ENTRY_CACHABLE))
212 setPublicKey();
213 }
214
215 size_t
216 StoreEntry::inUseCount()
217 {
218 if (!pool)
219 return 0;
220 return pool->getInUseCount();
221 }
222
223 const char *
224 StoreEntry::getMD5Text() const
225 {
226 return storeKeyText((const cache_key *)key);
227 }
228
229 #include "comm.h"
230
231 void
232 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
233 {
234 StoreEntry *anEntry = (StoreEntry *)theContext;
235 anEntry->delayAwareRead(aRead.conn,
236 aRead.buf,
237 aRead.len,
238 aRead.callback);
239 }
240
241 void
242 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
243 {
244 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
245 /* sketch: readdeferer* = getdeferer.
246 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
247 */
248
249 if (amountToRead == 0) {
250 assert (mem_obj);
251 /* read ahead limit */
252 /* Perhaps these two calls should both live in MemObject */
253 #if USE_DELAY_POOLS
254 if (!mem_obj->readAheadPolicyCanRead()) {
255 #endif
256 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
257 return;
258 #if USE_DELAY_POOLS
259 }
260
261 /* delay id limit */
262 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
263 return;
264
265 #endif
266
267 }
268
269 if (fd_table[conn->fd].closing()) {
270 // Readers must have closing callbacks if they want to be notified. No
271 // readers appeared to care around 2009/12/14 as they skipped reading
272 // for other reasons. Closing may already be true at the delyaAwareRead
273 // call time or may happen while we wait after delayRead() above.
274 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
275 callback);
276 return; // the read callback will never be called
277 }
278
279 comm_read(conn, buf, amountToRead, callback);
280 }
281
282 size_t
283 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
284 {
285 if (mem_obj == NULL)
286 return aRange.end;
287
288 #if URL_CHECKSUM_DEBUG
289
290 mem_obj->checkUrlChecksum();
291
292 #endif
293
294 if (!mem_obj->readAheadPolicyCanRead())
295 return 0;
296
297 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
298 }
299
300 bool
301 StoreEntry::checkDeferRead(int fd) const
302 {
303 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
304 }
305
306 void
307 StoreEntry::setNoDelay (bool const newValue)
308 {
309 if (mem_obj)
310 mem_obj->setNoDelay(newValue);
311 }
312
313 store_client_t
314 StoreEntry::storeClientType() const
315 {
316 /* The needed offset isn't in memory
317 * XXX TODO: this is wrong for range requests
318 * as the needed offset may *not* be 0, AND
319 * offset 0 in the memory object is the HTTP headers.
320 */
321
322 if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
323 // clients of an object cached in shared memory are memory clients
324 return STORE_MEM_CLIENT;
325 }
326
327 assert(mem_obj);
328
329 if (mem_obj->inmem_lo)
330 return STORE_DISK_CLIENT;
331
332 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
333 /* I don't think we should be adding clients to aborted entries */
334 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
335 return STORE_MEM_CLIENT;
336 }
337
338 if (store_status == STORE_OK) {
339 /* the object has completed. */
340
341 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
342 if (swap_status == SWAPOUT_DONE) {
343 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
344 if (mem_obj->endOffset() == mem_obj->object_sz) {
345 /* hot object fully swapped in */
346 return STORE_MEM_CLIENT;
347 }
348 } else {
349 /* Memory-only, or currently being swapped out */
350 return STORE_MEM_CLIENT;
351 }
352 }
353 return STORE_DISK_CLIENT;
354 }
355
356 /* here and past, entry is STORE_PENDING */
357 /*
358 * If this is the first client, let it be the mem client
359 */
360 if (mem_obj->nclients == 1)
361 return STORE_MEM_CLIENT;
362
363 /*
364 * If there is no disk file to open yet, we must make this a
365 * mem client. If we can't open the swapin file before writing
366 * to the client, there is no guarantee that we will be able
367 * to open it later when we really need it.
368 */
369 if (swap_status == SWAPOUT_NONE)
370 return STORE_MEM_CLIENT;
371
372 /*
373 * otherwise, make subsequent clients read from disk so they
374 * can not delay the first, and vice-versa.
375 */
376 return STORE_DISK_CLIENT;
377 }
378
379 StoreEntry::StoreEntry():
380 hidden_mem_obj(NULL),
381 swap_file_sz(0)
382 {
383 debugs(20, 3, HERE << "new StoreEntry " << this);
384 mem_obj = NULL;
385
386 expires = lastmod = lastref = timestamp = -1;
387
388 swap_status = SWAPOUT_NONE;
389 swap_filen = -1;
390 swap_dirn = -1;
391 }
392
393 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl):
394 hidden_mem_obj(NULL),
395 swap_file_sz(0)
396 {
397 debugs(20, 3, HERE << "new StoreEntry " << this);
398 mem_obj = new MemObject(aUrl, aLogUrl);
399
400 expires = lastmod = lastref = timestamp = -1;
401
402 swap_status = SWAPOUT_NONE;
403 swap_filen = -1;
404 swap_dirn = -1;
405 }
406
407 StoreEntry::~StoreEntry()
408 {
409 if (swap_filen >= 0) {
410 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
411 sd.disconnect(*this);
412 }
413 delete hidden_mem_obj;
414 }
415
416 #if USE_ADAPTATION
417 void
418 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
419 {
420 if (!deferredProducer)
421 deferredProducer = producer;
422 else
423 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
424 *deferredProducer << ", requested call: " << *producer);
425 }
426
427 void
428 StoreEntry::kickProducer()
429 {
430 if (deferredProducer != NULL) {
431 ScheduleCallHere(deferredProducer);
432 deferredProducer = NULL;
433 }
434 }
435 #endif
436
437 void
438 StoreEntry::destroyMemObject()
439 {
440 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
441 setMemStatus(NOT_IN_MEMORY);
442 MemObject *mem = mem_obj;
443 mem_obj = NULL;
444 delete mem;
445 delete hidden_mem_obj;
446 hidden_mem_obj = NULL;
447 }
448
449 void
450 StoreEntry::hideMemObject()
451 {
452 debugs(20, 3, HERE << "hiding " << mem_obj);
453 assert(mem_obj);
454 assert(!hidden_mem_obj);
455 hidden_mem_obj = mem_obj;
456 mem_obj = NULL;
457 }
458
459 void
460 destroyStoreEntry(void *data)
461 {
462 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
463 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
464 assert(e != NULL);
465
466 if (e == NullStoreEntry::getInstance())
467 return;
468
469 e->destroyMemObject();
470
471 e->hashDelete();
472
473 assert(e->key == NULL);
474
475 delete e;
476 }
477
478 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
479
480 void
481 StoreEntry::hashInsert(const cache_key * someKey)
482 {
483 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
484 key = storeKeyDup(someKey);
485 hash_join(store_table, this);
486 }
487
488 void
489 StoreEntry::hashDelete()
490 {
491 hash_remove_link(store_table, this);
492 storeKeyFree((const cache_key *)key);
493 key = NULL;
494 }
495
496 /* -------------------------------------------------------------------------- */
497
498 /* get rid of memory copy of the object */
499 void
500 StoreEntry::purgeMem()
501 {
502 if (mem_obj == NULL)
503 return;
504
505 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
506
507 destroyMemObject();
508
509 if (swap_status != SWAPOUT_DONE)
510 release();
511 }
512
513 /* RBC 20050104 this is wrong- memory ref counting
514 * is not at all equivalent to the store 'usage' concept
515 * which the replacement policies should be acting upon.
516 * specifically, object iteration within stores needs
517 * memory ref counting to prevent race conditions,
518 * but this should not influence store replacement.
519 */
520 void
521
522 StoreEntry::lock()
523 {
524 ++lock_count;
525 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
526 lock_count );
527 lastref = squid_curtime;
528 Store::Root().reference(*this);
529 }
530
531 void
532 StoreEntry::setReleaseFlag()
533 {
534 if (EBIT_TEST(flags, RELEASE_REQUEST))
535 return;
536
537 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
538
539 EBIT_SET(flags, RELEASE_REQUEST);
540 }
541
542 void
543 StoreEntry::releaseRequest()
544 {
545 if (EBIT_TEST(flags, RELEASE_REQUEST))
546 return;
547
548 setReleaseFlag();
549
550 /*
551 * Clear cachable flag here because we might get called before
552 * anyone else even looks at the cachability flag. Also, this
553 * prevents httpMakePublic from really setting a public key.
554 */
555 EBIT_CLR(flags, ENTRY_CACHABLE);
556
557 setPrivateKey();
558 }
559
560 /* unlock object, return -1 if object get released after unlock
561 * otherwise lock_count */
562 int
563 StoreEntry::unlock()
564 {
565 --lock_count;
566 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
567
568 if (lock_count)
569 return (int) lock_count;
570
571 if (store_status == STORE_PENDING)
572 setReleaseFlag();
573
574 assert(storePendingNClients(this) == 0);
575
576 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
577 this->release();
578 return 0;
579 }
580
581 if (EBIT_TEST(flags, KEY_PRIVATE))
582 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
583
584 Store::Root().handleIdleEntry(*this); // may delete us
585 return 0;
586 }
587
588 void
589 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
590 {
591 assert (aClient);
592 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
593
594 if (!result)
595 aClient->created (NullStoreEntry::getInstance());
596 else
597 aClient->created (result);
598 }
599
600 void
601 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
602 {
603 assert (aClient);
604 StoreEntry *result = storeGetPublicByRequest (request);
605
606 if (!result)
607 result = NullStoreEntry::getInstance();
608
609 aClient->created (result);
610 }
611
612 void
613 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
614 {
615 assert (aClient);
616 StoreEntry *result = storeGetPublic (uri, method);
617
618 if (!result)
619 result = NullStoreEntry::getInstance();
620
621 aClient->created (result);
622 }
623
624 StoreEntry *
625 storeGetPublic(const char *uri, const HttpRequestMethod& method)
626 {
627 return Store::Root().get(storeKeyPublic(uri, method));
628 }
629
630 StoreEntry *
631 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
632 {
633 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
634 }
635
636 StoreEntry *
637 storeGetPublicByRequest(HttpRequest * req)
638 {
639 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
640
641 if (e == NULL && req->method == METHOD_HEAD)
642 /* We can generate a HEAD reply from a cached GET object */
643 e = storeGetPublicByRequestMethod(req, METHOD_GET);
644
645 return e;
646 }
647
648 static int
649 getKeyCounter(void)
650 {
651 static int key_counter = 0;
652
653 if (++key_counter < 0)
654 key_counter = 1;
655
656 return key_counter;
657 }
658
659 /* RBC 20050104 AFAICT this should become simpler:
660 * rather than reinserting with a special key it should be marked
661 * as 'released' and then cleaned up when refcounting indicates.
662 * the StoreHashIndex could well implement its 'released' in the
663 * current manner.
664 * Also, clean log writing should skip over ia,t
665 * Otherwise, we need a 'remove from the index but not the store
666 * concept'.
667 */
668 void
669 StoreEntry::setPrivateKey()
670 {
671 const cache_key *newkey;
672
673 if (key && EBIT_TEST(flags, KEY_PRIVATE))
674 return; /* is already private */
675
676 if (key) {
677 if (swap_filen > -1)
678 storeDirSwapLog(this, SWAP_LOG_DEL);
679
680 hashDelete();
681 }
682
683 if (mem_obj != NULL) {
684 mem_obj->id = getKeyCounter();
685 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
686 } else {
687 newkey = storeKeyPrivate("JUNK", METHOD_NONE, getKeyCounter());
688 }
689
690 assert(hash_lookup(store_table, newkey) == NULL);
691 EBIT_SET(flags, KEY_PRIVATE);
692 hashInsert(newkey);
693 }
694
695 void
696 StoreEntry::setPublicKey()
697 {
698 StoreEntry *e2 = NULL;
699 const cache_key *newkey;
700
701 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
702 return; /* is already public */
703
704 assert(mem_obj);
705
706 /*
707 * We can't make RELEASE_REQUEST objects public. Depending on
708 * when RELEASE_REQUEST gets set, we might not be swapping out
709 * the object. If we're not swapping out, then subsequent
710 * store clients won't be able to access object data which has
711 * been freed from memory.
712 *
713 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
714 * be set, and StoreEntry::setPublicKey() should not be called.
715 */
716 #if MORE_DEBUG_OUTPUT
717
718 if (EBIT_TEST(flags, RELEASE_REQUEST))
719 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
720
721 #endif
722
723 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
724
725 if (mem_obj->request) {
726 HttpRequest *request = mem_obj->request;
727
728 if (!mem_obj->vary_headers) {
729 /* First handle the case where the object no longer varies */
730 safe_free(request->vary_headers);
731 } else {
732 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
733 /* Oops.. the variance has changed. Kill the base object
734 * to record the new variance key
735 */
736 safe_free(request->vary_headers); /* free old "bad" variance key */
737 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
738
739 if (pe)
740 pe->release();
741 }
742
743 /* Make sure the request knows the variance status */
744 if (!request->vary_headers) {
745 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
746
747 if (vary)
748 request->vary_headers = xstrdup(vary);
749 }
750 }
751
752 // TODO: storeGetPublic() calls below may create unlocked entries.
753 // We should add/use storeHas() API or lock/unlock those entries.
754 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
755 /* Create "vary" base object */
756 String vary;
757 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
758 /* We are allowed to do this typecast */
759 HttpReply *rep = new HttpReply;
760 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
761 vary = mem_obj->getReply()->header.getList(HDR_VARY);
762
763 if (vary.size()) {
764 /* Again, we own this structure layout */
765 rep->header.putStr(HDR_VARY, vary.termedBuf());
766 vary.clean();
767 }
768
769 #if X_ACCELERATOR_VARY
770 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
771
772 if (vary.defined()) {
773 /* Again, we own this structure layout */
774 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
775 vary.clean();
776 }
777
778 #endif
779 pe->replaceHttpReply(rep);
780
781 pe->timestampsSet();
782
783 pe->makePublic();
784
785 pe->complete();
786
787 pe->unlock();
788 }
789
790 newkey = storeKeyPublicByRequest(mem_obj->request);
791 } else
792 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
793
794 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
795 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
796 e2->setPrivateKey();
797 e2->release();
798
799 if (mem_obj->request)
800 newkey = storeKeyPublicByRequest(mem_obj->request);
801 else
802 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
803 }
804
805 if (key)
806 hashDelete();
807
808 EBIT_CLR(flags, KEY_PRIVATE);
809
810 hashInsert(newkey);
811
812 if (swap_filen > -1)
813 storeDirSwapLog(this, SWAP_LOG_ADD);
814 }
815
816 StoreEntry *
817 storeCreateEntry(const char *url, const char *log_url, request_flags flags, const HttpRequestMethod& method)
818 {
819 StoreEntry *e = NULL;
820 MemObject *mem = NULL;
821 debugs(20, 3, "storeCreateEntry: '" << url << "'");
822
823 e = new StoreEntry(url, log_url);
824 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
825 mem = e->mem_obj;
826 mem->method = method;
827
828 if (neighbors_do_private_keys || !flags.hierarchical)
829 e->setPrivateKey();
830 else
831 e->setPublicKey();
832
833 if (flags.cachable) {
834 EBIT_SET(e->flags, ENTRY_CACHABLE);
835 EBIT_CLR(e->flags, RELEASE_REQUEST);
836 } else {
837 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
838 e->releaseRequest();
839 }
840
841 e->store_status = STORE_PENDING;
842 e->setMemStatus(NOT_IN_MEMORY);
843 e->refcount = 0;
844 e->lastref = squid_curtime;
845 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
846 e->ping_status = PING_NONE;
847 EBIT_SET(e->flags, ENTRY_VALIDATED);
848 return e;
849 }
850
851 /* Mark object as expired */
852 void
853 StoreEntry::expireNow()
854 {
855 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
856 expires = squid_curtime;
857 }
858
859 void
860 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
861 {
862 PROF_start(storeWriteComplete);
863 StoreEntry *e = (StoreEntry *)data;
864
865 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
866 PROF_stop(storeWriteComplete);
867 return;
868 }
869
870 e->invokeHandlers();
871 PROF_stop(storeWriteComplete);
872 }
873
874 void
875 StoreEntry::write (StoreIOBuffer writeBuffer)
876 {
877 assert(mem_obj != NULL);
878 /* This assert will change when we teach the store to update */
879 PROF_start(StoreEntry_write);
880 assert(store_status == STORE_PENDING);
881
882 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
883 PROF_stop(StoreEntry_write);
884 storeGetMemSpace(writeBuffer.length);
885 mem_obj->write (writeBuffer, storeWriteComplete, this);
886 }
887
888 /* Append incoming data from a primary server to an entry. */
889 void
890 StoreEntry::append(char const *buf, int len)
891 {
892 assert(mem_obj != NULL);
893 assert(len >= 0);
894 assert(store_status == STORE_PENDING);
895
896 StoreIOBuffer tempBuffer;
897 tempBuffer.data = (char *)buf;
898 tempBuffer.length = len;
899 /*
900 * XXX sigh, offset might be < 0 here, but it gets "corrected"
901 * later. This offset crap is such a mess.
902 */
903 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
904 write(tempBuffer);
905 }
906
907 void
908 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
909 {
910 va_list args;
911 va_start(args, fmt);
912
913 storeAppendVPrintf(e, fmt, args);
914 va_end(args);
915 }
916
917 /* used be storeAppendPrintf and Packer */
918 void
919 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
920 {
921 LOCAL_ARRAY(char, buf, 4096);
922 buf[0] = '\0';
923 vsnprintf(buf, 4096, fmt, vargs);
924 e->append(buf, strlen(buf));
925 }
926
927 struct _store_check_cachable_hist {
928
929 struct {
930 int non_get;
931 int not_entry_cachable;
932 int wrong_content_length;
933 int negative_cached;
934 int too_big;
935 int too_small;
936 int private_key;
937 int too_many_open_files;
938 int too_many_open_fds;
939 } no;
940
941 struct {
942 int Default;
943 } yes;
944 } store_check_cachable_hist;
945
946 int
947 storeTooManyDiskFilesOpen(void)
948 {
949 if (Config.max_open_disk_fds == 0)
950 return 0;
951
952 if (store_open_disk_fd > Config.max_open_disk_fds)
953 return 1;
954
955 return 0;
956 }
957
958 int
959 StoreEntry::checkTooSmall()
960 {
961 if (EBIT_TEST(flags, ENTRY_SPECIAL))
962 return 0;
963
964 if (STORE_OK == store_status)
965 if (mem_obj->object_sz < 0 ||
966 mem_obj->object_sz < Config.Store.minObjectSize)
967 return 1;
968 if (getReply()->content_length > -1)
969 if (getReply()->content_length < Config.Store.minObjectSize)
970 return 1;
971 return 0;
972 }
973
974 // TODO: remove checks already performed by swapoutPossible()
975 // TODO: move "too many open..." checks outside -- we are called too early/late
976 int
977 StoreEntry::checkCachable()
978 {
979 #if CACHE_ALL_METHODS
980
981 if (mem_obj->method != METHOD_GET) {
982 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
983 ++store_check_cachable_hist.no.non_get;
984 } else
985 #endif
986 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
987 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
988 ++store_check_cachable_hist.no.wrong_content_length;
989 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
991 ++store_check_cachable_hist.no.not_entry_cachable;
992 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
993 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
994 ++store_check_cachable_hist.no.negative_cached;
995 return 0; /* avoid release call below */
996 } else if ((getReply()->content_length > 0 &&
997 getReply()->content_length
998 > Config.Store.maxObjectSize) ||
999 mem_obj->endOffset() > Config.Store.maxObjectSize) {
1000 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1001 ++store_check_cachable_hist.no.too_big;
1002 } else if (getReply()->content_length > Config.Store.maxObjectSize) {
1003 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1004 ++store_check_cachable_hist.no.too_big;
1005 } else if (checkTooSmall()) {
1006 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1007 ++store_check_cachable_hist.no.too_small;
1008 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1009 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1010 ++store_check_cachable_hist.no.private_key;
1011 } else if (swap_status != SWAPOUT_NONE) {
1012 /*
1013 * here we checked the swap_status because the remaining
1014 * cases are only relevant only if we haven't started swapping
1015 * out the object yet.
1016 */
1017 return 1;
1018 } else if (storeTooManyDiskFilesOpen()) {
1019 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1020 ++store_check_cachable_hist.no.too_many_open_files;
1021 } else if (fdNFree() < RESERVED_FD) {
1022 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1023 ++store_check_cachable_hist.no.too_many_open_fds;
1024 } else {
1025 ++store_check_cachable_hist.yes.Default;
1026 return 1;
1027 }
1028
1029 releaseRequest();
1030 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1031 return 0;
1032 }
1033
1034 void
1035 storeCheckCachableStats(StoreEntry *sentry)
1036 {
1037 storeAppendPrintf(sentry, "Category\t Count\n");
1038
1039 #if CACHE_ALL_METHODS
1040
1041 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1042 store_check_cachable_hist.no.non_get);
1043 #endif
1044
1045 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1046 store_check_cachable_hist.no.not_entry_cachable);
1047 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1048 store_check_cachable_hist.no.wrong_content_length);
1049 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1050 store_check_cachable_hist.no.negative_cached);
1051 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1052 store_check_cachable_hist.no.too_big);
1053 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1054 store_check_cachable_hist.no.too_small);
1055 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1056 store_check_cachable_hist.no.private_key);
1057 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1058 store_check_cachable_hist.no.too_many_open_files);
1059 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1060 store_check_cachable_hist.no.too_many_open_fds);
1061 storeAppendPrintf(sentry, "yes.default\t%d\n",
1062 store_check_cachable_hist.yes.Default);
1063 }
1064
1065 void
1066 StoreEntry::complete()
1067 {
1068 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1069
1070 if (store_status != STORE_PENDING) {
1071 /*
1072 * if we're not STORE_PENDING, then probably we got aborted
1073 * and there should be NO clients on this entry
1074 */
1075 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1076 assert(mem_obj->nclients == 0);
1077 return;
1078 }
1079
1080 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1081 * in use of object_sz?
1082 */
1083 mem_obj->object_sz = mem_obj->endOffset();
1084
1085 store_status = STORE_OK;
1086
1087 assert(mem_status == NOT_IN_MEMORY);
1088
1089 if (!validLength()) {
1090 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1091 releaseRequest();
1092 }
1093
1094 #if USE_CACHE_DIGESTS
1095 if (mem_obj->request)
1096 mem_obj->request->hier.store_complete_stop = current_time;
1097
1098 #endif
1099 /*
1100 * We used to call invokeHandlers, then storeSwapOut. However,
1101 * Madhukar Reddy <myreddy@persistence.com> reported that
1102 * responses without content length would sometimes get released
1103 * in client_side, thinking that the response is incomplete.
1104 */
1105 invokeHandlers();
1106 }
1107
1108 /*
1109 * Someone wants to abort this transfer. Set the reason in the
1110 * request structure, call the server-side callback and mark the
1111 * entry for releasing
1112 */
1113 void
1114 StoreEntry::abort()
1115 {
1116 ++statCounter.aborted_requests;
1117 assert(store_status == STORE_PENDING);
1118 assert(mem_obj != NULL);
1119 debugs(20, 6, "storeAbort: " << getMD5Text());
1120
1121 lock(); /* lock while aborting */
1122 negativeCache();
1123
1124 releaseRequest();
1125
1126 EBIT_SET(flags, ENTRY_ABORTED);
1127
1128 setMemStatus(NOT_IN_MEMORY);
1129
1130 store_status = STORE_OK;
1131
1132 /* Notify the server side */
1133
1134 /*
1135 * DPW 2007-05-07
1136 * Should we check abort.data for validity?
1137 */
1138 if (mem_obj->abort.callback) {
1139 if (!cbdataReferenceValid(mem_obj->abort.data))
1140 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1141 eventAdd("mem_obj->abort.callback",
1142 mem_obj->abort.callback,
1143 mem_obj->abort.data,
1144 0.0,
1145 true);
1146 unregisterAbort();
1147 }
1148
1149 /* XXX Should we reverse these two, so that there is no
1150 * unneeded disk swapping triggered?
1151 */
1152 /* Notify the client side */
1153 invokeHandlers();
1154
1155 // abort swap out, invalidating what was created so far (release follows)
1156 swapOutFileClose(StoreIOState::writerGone);
1157
1158 unlock(); /* unlock */
1159 }
1160
1161 /**
1162 * Clear Memory storage to accommodate the given object len
1163 */
1164 void
1165 storeGetMemSpace(int size)
1166 {
1167 PROF_start(storeGetMemSpace);
1168 StoreEntry *e = NULL;
1169 int released = 0;
1170 static time_t last_check = 0;
1171 size_t pages_needed;
1172 RemovalPurgeWalker *walker;
1173
1174 if (squid_curtime == last_check) {
1175 PROF_stop(storeGetMemSpace);
1176 return;
1177 }
1178
1179 last_check = squid_curtime;
1180
1181 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1182
1183 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1184 PROF_stop(storeGetMemSpace);
1185 return;
1186 }
1187
1188 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1189 " pages");
1190
1191 /* XXX what to set as max_scan here? */
1192 walker = mem_policy->PurgeInit(mem_policy, 100000);
1193
1194 while ((e = walker->Next(walker))) {
1195 e->purgeMem();
1196 ++released;
1197
1198 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1199 break;
1200 }
1201
1202 walker->Done(walker);
1203 debugs(20, 3, "storeGetMemSpace stats:");
1204 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1205 debugs(20, 3, " " << std::setw(6) << released << " were released");
1206 PROF_stop(storeGetMemSpace);
1207 }
1208
1209 /* thunk through to Store::Root().maintain(). Note that this would be better still
1210 * if registered against the root store itself, but that requires more complex
1211 * update logic - bigger fish to fry first. Long term each store when
1212 * it becomes active will self register
1213 */
1214 void
1215 Store::Maintain(void *notused)
1216 {
1217 Store::Root().maintain();
1218
1219 /* Reregister a maintain event .. */
1220 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1221
1222 }
1223
1224 /* The maximum objects to scan for maintain storage space */
1225 #define MAINTAIN_MAX_SCAN 1024
1226 #define MAINTAIN_MAX_REMOVE 64
1227
1228 /*
1229 * This routine is to be called by main loop in main.c.
1230 * It removes expired objects on only one bucket for each time called.
1231 *
1232 * This should get called 1/s from main().
1233 */
1234 void
1235 StoreController::maintain()
1236 {
1237 static time_t last_warn_time = 0;
1238
1239 PROF_start(storeMaintainSwapSpace);
1240 swapDir->maintain();
1241
1242 /* this should be emitted by the oversize dir, not globally */
1243
1244 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1245 if (squid_curtime - last_warn_time > 10) {
1246 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1247 << Store::Root().currentSize() / 1024.0 << " KB > "
1248 << (Store::Root().maxSize() >> 10) << " KB");
1249 last_warn_time = squid_curtime;
1250 }
1251 }
1252
1253 PROF_stop(storeMaintainSwapSpace);
1254 }
1255
1256 /* release an object from a cache */
1257 void
1258 StoreEntry::release()
1259 {
1260 PROF_start(storeRelease);
1261 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1262 /* If, for any reason we can't discard this object because of an
1263 * outstanding request, mark it for pending release */
1264
1265 if (locked()) {
1266 expireNow();
1267 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1268 releaseRequest();
1269 PROF_stop(storeRelease);
1270 return;
1271 }
1272
1273 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1274 setPrivateKey();
1275
1276 if (mem_obj)
1277 destroyMemObject();
1278
1279 if (swap_filen > -1) {
1280 /*
1281 * Fake a call to StoreEntry->lock() When rebuilding is done,
1282 * we'll just call StoreEntry->unlock() on these.
1283 */
1284 ++lock_count;
1285 setReleaseFlag();
1286 LateReleaseStack.push_back(this);
1287 } else {
1288 destroyStoreEntry(static_cast<hash_link *>(this));
1289 // "this" is no longer valid
1290 }
1291
1292 PROF_stop(storeRelease);
1293 return;
1294 }
1295
1296 storeLog(STORE_LOG_RELEASE, this);
1297
1298 if (swap_filen > -1) {
1299 // log before unlink() below clears swap_filen
1300 if (!EBIT_TEST(flags, KEY_PRIVATE))
1301 storeDirSwapLog(this, SWAP_LOG_DEL);
1302
1303 unlink();
1304 }
1305
1306 setMemStatus(NOT_IN_MEMORY);
1307 destroyStoreEntry(static_cast<hash_link *>(this));
1308 PROF_stop(storeRelease);
1309 }
1310
1311 static void
1312 storeLateRelease(void *unused)
1313 {
1314 StoreEntry *e;
1315 int i;
1316 static int n = 0;
1317
1318 if (StoreController::store_dirs_rebuilding) {
1319 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1320 return;
1321 }
1322
1323 for (i = 0; i < 10; ++i) {
1324 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1325
1326 if (e == NULL) {
1327 /* done! */
1328 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1329 return;
1330 }
1331
1332 e->unlock();
1333 ++n;
1334 }
1335
1336 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1337 }
1338
1339 /* return 1 if a store entry is locked */
1340 int
1341 StoreEntry::locked() const
1342 {
1343 if (lock_count)
1344 return 1;
1345
1346 if (swap_status == SWAPOUT_WRITING)
1347 return 1;
1348
1349 if (store_status == STORE_PENDING)
1350 return 1;
1351
1352 /*
1353 * SPECIAL, PUBLIC entries should be "locked"
1354 */
1355 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1356 if (!EBIT_TEST(flags, KEY_PRIVATE))
1357 return 1;
1358
1359 return 0;
1360 }
1361
1362 bool
1363 StoreEntry::validLength() const
1364 {
1365 int64_t diff;
1366 const HttpReply *reply;
1367 assert(mem_obj != NULL);
1368 reply = getReply();
1369 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1370 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1371 objectLen());
1372 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1373 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1374
1375 if (reply->content_length < 0) {
1376 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1377 return 1;
1378 }
1379
1380 if (reply->hdr_sz == 0) {
1381 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1382 return 1;
1383 }
1384
1385 if (mem_obj->method == METHOD_HEAD) {
1386 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1387 return 1;
1388 }
1389
1390 if (reply->sline.status == HTTP_NOT_MODIFIED)
1391 return 1;
1392
1393 if (reply->sline.status == HTTP_NO_CONTENT)
1394 return 1;
1395
1396 diff = reply->hdr_sz + reply->content_length - objectLen();
1397
1398 if (diff == 0)
1399 return 1;
1400
1401 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1402
1403 return 0;
1404 }
1405
1406 static void
1407 storeRegisterWithCacheManager(void)
1408 {
1409 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1410 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1411 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1412 storeCheckCachableStats, 0, 1);
1413 }
1414
1415 void
1416 storeInit(void)
1417 {
1418 storeKeyInit();
1419 mem_policy = createRemovalPolicy(Config.memPolicy);
1420 storeDigestInit();
1421 storeLogOpen();
1422 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1423 Store::Root().init();
1424 storeRebuildStart();
1425
1426 storeRegisterWithCacheManager();
1427 }
1428
1429 void
1430 storeConfigure(void)
1431 {
1432 store_swap_high = (long) (((float) Store::Root().maxSize() *
1433 (float) Config.Swap.highWaterMark) / (float) 100);
1434 store_swap_low = (long) (((float) Store::Root().maxSize() *
1435 (float) Config.Swap.lowWaterMark) / (float) 100);
1436 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1437 }
1438
1439 bool
1440 StoreEntry::memoryCachable() const
1441 {
1442 if (mem_obj == NULL)
1443 return 0;
1444
1445 if (mem_obj->data_hdr.size() == 0)
1446 return 0;
1447
1448 if (mem_obj->inmem_lo != 0)
1449 return 0;
1450
1451 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1452 return 0;
1453
1454 return 1;
1455 }
1456
1457 int
1458 StoreEntry::checkNegativeHit() const
1459 {
1460 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1461 return 0;
1462
1463 if (expires <= squid_curtime)
1464 return 0;
1465
1466 if (store_status != STORE_OK)
1467 return 0;
1468
1469 return 1;
1470 }
1471
1472 /**
1473 * Set object for negative caching.
1474 * Preserves any expiry information given by the server.
1475 * In absence of proper expiry info it will set to expire immediately,
1476 * or with HTTP-violations enabled the configured negative-TTL is observed
1477 */
1478 void
1479 StoreEntry::negativeCache()
1480 {
1481 // XXX: should make the default for expires 0 instead of -1
1482 // so we can distinguish "Expires: -1" from nothing.
1483 if (expires <= 0)
1484 #if USE_HTTP_VIOLATIONS
1485 expires = squid_curtime + Config.negativeTtl;
1486 #else
1487 expires = squid_curtime;
1488 #endif
1489 EBIT_SET(flags, ENTRY_NEGCACHED);
1490 }
1491
1492 void
1493 storeFreeMemory(void)
1494 {
1495 Store::Root(NULL);
1496 #if USE_CACHE_DIGESTS
1497
1498 if (store_digest)
1499 cacheDigestDestroy(store_digest);
1500
1501 #endif
1502
1503 store_digest = NULL;
1504 }
1505
1506 int
1507 expiresMoreThan(time_t expires, time_t when)
1508 {
1509 if (expires < 0) /* No Expires given */
1510 return 1;
1511
1512 return (expires > (squid_curtime + when));
1513 }
1514
1515 int
1516 StoreEntry::validToSend() const
1517 {
1518 if (EBIT_TEST(flags, RELEASE_REQUEST))
1519 return 0;
1520
1521 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1522 if (expires <= squid_curtime)
1523 return 0;
1524
1525 if (EBIT_TEST(flags, ENTRY_ABORTED))
1526 return 0;
1527
1528 return 1;
1529 }
1530
1531 void
1532 StoreEntry::timestampsSet()
1533 {
1534 const HttpReply *reply = getReply();
1535 time_t served_date = reply->date;
1536 int age = reply->header.getInt(HDR_AGE);
1537 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1538 /* make sure that 0 <= served_date <= squid_curtime */
1539
1540 if (served_date < 0 || served_date > squid_curtime)
1541 served_date = squid_curtime;
1542
1543 /* Bug 1791:
1544 * If the returned Date: is more than 24 hours older than
1545 * the squid_curtime, then one of us needs to use NTP to set our
1546 * clock. We'll pretend that our clock is right.
1547 */
1548 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1549 served_date = squid_curtime;
1550
1551 /*
1552 * Compensate with Age header if origin server clock is ahead
1553 * of us and there is a cache in between us and the origin
1554 * server. But DONT compensate if the age value is larger than
1555 * squid_curtime because it results in a negative served_date.
1556 */
1557 if (age > squid_curtime - served_date)
1558 if (squid_curtime > age)
1559 served_date = squid_curtime - age;
1560
1561 // compensate for Squid-to-server and server-to-Squid delays
1562 if (mem_obj && mem_obj->request) {
1563 const time_t request_sent =
1564 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1565 if (0 < request_sent && request_sent < squid_curtime)
1566 served_date -= (squid_curtime - request_sent);
1567 }
1568
1569 if (reply->expires > 0 && reply->date > -1)
1570 expires = served_date + (reply->expires - reply->date);
1571 else
1572 expires = reply->expires;
1573
1574 lastmod = reply->last_modified;
1575
1576 timestamp = served_date;
1577 }
1578
1579 void
1580 StoreEntry::registerAbort(STABH * cb, void *data)
1581 {
1582 assert(mem_obj);
1583 assert(mem_obj->abort.callback == NULL);
1584 mem_obj->abort.callback = cb;
1585 mem_obj->abort.data = cbdataReference(data);
1586 }
1587
1588 void
1589 StoreEntry::unregisterAbort()
1590 {
1591 assert(mem_obj);
1592 if (mem_obj->abort.callback) {
1593 mem_obj->abort.callback = NULL;
1594 cbdataReferenceDone(mem_obj->abort.data);
1595 }
1596 }
1597
1598 void
1599 StoreEntry::dump(int l) const
1600 {
1601 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1602 debugs(20, l, "StoreEntry->next: " << next);
1603 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1604 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1605 debugs(20, l, "StoreEntry->lastref: " << lastref);
1606 debugs(20, l, "StoreEntry->expires: " << expires);
1607 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1608 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1609 debugs(20, l, "StoreEntry->refcount: " << refcount);
1610 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1611 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1612 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1613 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1614 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1615 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1616 debugs(20, l, "StoreEntry->store_status: " << store_status);
1617 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1618 }
1619
1620 /*
1621 * NOTE, this function assumes only two mem states
1622 */
1623 void
1624 StoreEntry::setMemStatus(mem_status_t new_status)
1625 {
1626 if (new_status == mem_status)
1627 return;
1628
1629 // are we using a shared memory cache?
1630 if (Config.memShared && IamWorkerProcess()) {
1631 // enumerate calling cases if shared memory is enabled
1632 assert(new_status != IN_MEMORY || EBIT_TEST(flags, ENTRY_SPECIAL));
1633 // This method was designed to update replacement policy, not to
1634 // actually purge something from the memory cache (TODO: rename?).
1635 // Shared memory cache does not have a policy that needs updates.
1636 mem_status = new_status;
1637 return;
1638 }
1639
1640 assert(mem_obj != NULL);
1641
1642 if (new_status == IN_MEMORY) {
1643 assert(mem_obj->inmem_lo == 0);
1644
1645 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1646 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1647 } else {
1648 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1649 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1650 }
1651
1652 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1653 } else {
1654 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1655 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1656 } else {
1657 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1658 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1659 }
1660
1661 --hot_obj_count;
1662 }
1663
1664 mem_status = new_status;
1665 }
1666
1667 const char *
1668 StoreEntry::url() const
1669 {
1670 if (this == NULL)
1671 return "[null_entry]";
1672 else if (mem_obj == NULL)
1673 return "[null_mem_obj]";
1674 else
1675 return mem_obj->url;
1676 }
1677
1678 void
1679 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1680 {
1681 if (mem_obj)
1682 return;
1683
1684 if (hidden_mem_obj) {
1685 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1686 mem_obj = hidden_mem_obj;
1687 hidden_mem_obj = NULL;
1688 mem_obj->resetUrls(aUrl, aLogUrl);
1689 return;
1690 }
1691
1692 mem_obj = new MemObject(aUrl, aLogUrl);
1693 }
1694
1695 /* this just sets DELAY_SENDING */
1696 void
1697 StoreEntry::buffer()
1698 {
1699 EBIT_SET(flags, DELAY_SENDING);
1700 }
1701
1702 /* this just clears DELAY_SENDING and Invokes the handlers */
1703 void
1704 StoreEntry::flush()
1705 {
1706 if (EBIT_TEST(flags, DELAY_SENDING)) {
1707 EBIT_CLR(flags, DELAY_SENDING);
1708 invokeHandlers();
1709 }
1710 }
1711
1712 int64_t
1713 StoreEntry::objectLen() const
1714 {
1715 assert(mem_obj != NULL);
1716 return mem_obj->object_sz;
1717 }
1718
1719 int64_t
1720 StoreEntry::contentLen() const
1721 {
1722 assert(mem_obj != NULL);
1723 assert(getReply() != NULL);
1724 return objectLen() - getReply()->hdr_sz;
1725 }
1726
1727 HttpReply const *
1728 StoreEntry::getReply () const
1729 {
1730 if (NULL == mem_obj)
1731 return NULL;
1732
1733 return mem_obj->getReply();
1734 }
1735
1736 void
1737 StoreEntry::reset()
1738 {
1739 assert (mem_obj);
1740 debugs(20, 3, "StoreEntry::reset: " << url());
1741 mem_obj->reset();
1742 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1743 rep->reset();
1744 expires = lastmod = timestamp = -1;
1745 }
1746
1747 /*
1748 * storeFsInit
1749 *
1750 * This routine calls the SETUP routine for each fs type.
1751 * I don't know where the best place for this is, and I'm not going to shuffle
1752 * around large chunks of code right now (that can be done once its working.)
1753 */
1754 void
1755 storeFsInit(void)
1756 {
1757 storeReplSetup();
1758 }
1759
1760 /*
1761 * called to add another store removal policy module
1762 */
1763 void
1764 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1765 {
1766 int i;
1767
1768 /* find the number of currently known repl types */
1769 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1770 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1771 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1772 return;
1773 }
1774 }
1775
1776 /* add the new type */
1777 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1778
1779 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1780
1781 storerepl_list[i].typestr = type;
1782
1783 storerepl_list[i].create = create;
1784 }
1785
1786 /*
1787 * Create a removal policy instance
1788 */
1789 RemovalPolicy *
1790 createRemovalPolicy(RemovalPolicySettings * settings)
1791 {
1792 storerepl_entry_t *r;
1793
1794 for (r = storerepl_list; r && r->typestr; ++r) {
1795 if (strcmp(r->typestr, settings->type) == 0)
1796 return r->create(settings->args);
1797 }
1798
1799 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1800 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1801 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1802 fatalf("ERROR: Unknown policy %s\n", settings->type);
1803 return NULL; /* NOTREACHED */
1804 }
1805
1806 #if 0
1807 void
1808 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1809 {
1810 if (e->swap_file_number == filn)
1811 return;
1812
1813 if (filn < 0) {
1814 assert(-1 == filn);
1815 storeDirMapBitReset(e->swap_file_number);
1816 storeDirLRUDelete(e);
1817 e->swap_file_number = -1;
1818 } else {
1819 assert(-1 == e->swap_file_number);
1820 storeDirMapBitSet(e->swap_file_number = filn);
1821 storeDirLRUAdd(e);
1822 }
1823 }
1824
1825 #endif
1826
1827 /*
1828 * Replace a store entry with
1829 * a new reply. This eats the reply.
1830 */
1831 void
1832 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1833 {
1834 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1835
1836 if (!mem_obj) {
1837 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1838 return;
1839 }
1840
1841 mem_obj->replaceHttpReply(rep);
1842
1843 if (andStartWriting)
1844 startWriting();
1845 }
1846
1847 void
1848 StoreEntry::startWriting()
1849 {
1850 Packer p;
1851
1852 /* TODO: when we store headers serparately remove the header portion */
1853 /* TODO: mark the length of the headers ? */
1854 /* We ONLY want the headers */
1855 packerToStoreInit(&p, this);
1856
1857 assert (isEmpty());
1858 assert(mem_obj);
1859
1860 const HttpReply *rep = getReply();
1861 assert(rep);
1862
1863 rep->packHeadersInto(&p);
1864 mem_obj->markEndOfReplyHeaders();
1865
1866 rep->body.packInto(&p);
1867
1868 packerClean(&p);
1869 }
1870
1871 char const *
1872 StoreEntry::getSerialisedMetaData()
1873 {
1874 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1875 int swap_hdr_sz;
1876 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1877 storeSwapTLVFree(tlv_list);
1878 assert (swap_hdr_sz >= 0);
1879 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1880 return result;
1881 }
1882
1883 void
1884 StoreEntry::trimMemory(const bool preserveSwappable)
1885 {
1886 /*
1887 * DPW 2007-05-09
1888 * Bug #1943. We must not let go any data for IN_MEMORY
1889 * objects. We have to wait until the mem_status changes.
1890 */
1891 if (mem_status == IN_MEMORY)
1892 return;
1893
1894 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1895 return; // cannot trim because we do not load them again
1896
1897 if (!preserveSwappable) {
1898 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1899 /* Nothing to do */
1900 return;
1901 }
1902 /*
1903 * Its not swap-able, and we're about to delete a chunk,
1904 * so we must make it PRIVATE. This is tricky/ugly because
1905 * for the most part, we treat swapable == cachable here.
1906 */
1907 releaseRequest();
1908 mem_obj->trimUnSwappable ();
1909 } else {
1910 mem_obj->trimSwappable ();
1911 }
1912 }
1913
1914 bool
1915 StoreEntry::modifiedSince(HttpRequest * request) const
1916 {
1917 int object_length;
1918 time_t mod_time = lastmod;
1919
1920 if (mod_time < 0)
1921 mod_time = timestamp;
1922
1923 debugs(88, 3, "modifiedSince: '" << url() << "'");
1924
1925 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1926
1927 if (mod_time < 0)
1928 return true;
1929
1930 /* Find size of the object */
1931 object_length = getReply()->content_length;
1932
1933 if (object_length < 0)
1934 object_length = contentLen();
1935
1936 if (mod_time > request->ims) {
1937 debugs(88, 3, "--> YES: entry newer than client");
1938 return true;
1939 } else if (mod_time < request->ims) {
1940 debugs(88, 3, "--> NO: entry older than client");
1941 return false;
1942 } else if (request->imslen < 0) {
1943 debugs(88, 3, "--> NO: same LMT, no client length");
1944 return false;
1945 } else if (request->imslen == object_length) {
1946 debugs(88, 3, "--> NO: same LMT, same length");
1947 return false;
1948 } else {
1949 debugs(88, 3, "--> YES: same LMT, different length");
1950 return true;
1951 }
1952 }
1953
1954 bool
1955 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1956 {
1957 const String reqETags = request.header.getList(HDR_IF_MATCH);
1958 return hasOneOfEtags(reqETags, false);
1959 }
1960
1961 bool
1962 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1963 {
1964 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1965 // weak comparison is allowed only for HEAD or full-body GET requests
1966 const bool allowWeakMatch = !request.flags.range &&
1967 (request.method == METHOD_GET || request.method == METHOD_HEAD);
1968 return hasOneOfEtags(reqETags, allowWeakMatch);
1969 }
1970
1971 /// whether at least one of the request ETags matches entity ETag
1972 bool
1973 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1974 {
1975 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1976 if (!repETag.str)
1977 return strListIsMember(&reqETags, "*", ',');
1978
1979 bool matched = false;
1980 const char *pos = NULL;
1981 const char *item;
1982 int ilen;
1983 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1984 if (!strncmp(item, "*", ilen))
1985 matched = true;
1986 else {
1987 String str;
1988 str.append(item, ilen);
1989 ETag reqETag;
1990 if (etagParseInit(&reqETag, str.termedBuf())) {
1991 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1992 etagIsStrongEqual(repETag, reqETag);
1993 }
1994 }
1995 }
1996 return matched;
1997 }
1998
1999 SwapDir::Pointer
2000 StoreEntry::store() const
2001 {
2002 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2003 return INDEXSD(swap_dirn);
2004 }
2005
2006 void
2007 StoreEntry::unlink()
2008 {
2009 store()->unlink(*this); // implies disconnect()
2010 swap_filen = -1;
2011 swap_dirn = -1;
2012 swap_status = SWAPOUT_NONE;
2013 }
2014
2015 /*
2016 * return true if the entry is in a state where
2017 * it can accept more data (ie with write() method)
2018 */
2019 bool
2020 StoreEntry::isAccepting() const
2021 {
2022 if (STORE_PENDING != store_status)
2023 return false;
2024
2025 if (EBIT_TEST(flags, ENTRY_ABORTED))
2026 return false;
2027
2028 return true;
2029 }
2030
2031 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2032 {
2033 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2034 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2035 e.swap_status;
2036 }
2037
2038 /* NullStoreEntry */
2039
2040 NullStoreEntry NullStoreEntry::_instance;
2041
2042 NullStoreEntry *
2043 NullStoreEntry::getInstance()
2044 {
2045 return &_instance;
2046 }
2047
2048 char const *
2049 NullStoreEntry::getMD5Text() const
2050 {
2051 return "N/A";
2052 }
2053
2054 void
2055 NullStoreEntry::operator delete(void*)
2056 {
2057 fatal ("Attempt to delete NullStoreEntry\n");
2058 }
2059
2060 char const *
2061 NullStoreEntry::getSerialisedMetaData()
2062 {
2063 return NULL;
2064 }
2065
2066 #if !_USE_INLINE_
2067 #include "Store.cci"
2068 #endif