]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Add custom Store ID code support
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * DEBUG: section 20 Storage Manager
4 * AUTHOR: Harvest Derived
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "CacheDigest.h"
36 #include "CacheManager.h"
37 #include "comm/Connection.h"
38 #include "ETag.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "globals.h"
42 #include "http.h"
43 #include "HttpReply.h"
44 #include "HttpRequest.h"
45 #include "mem_node.h"
46 #include "MemObject.h"
47 #include "mgr/Registration.h"
48 #include "mgr/StoreIoAction.h"
49 #include "profiler/Profiler.h"
50 #include "repl_modules.h"
51 #include "RequestFlags.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "Stack.h"
55 #include "StatCounters.h"
56 #include "stmem.h"
57 #include "store_digest.h"
58 #include "store_key_md5.h"
59 #include "store_key_md5.h"
60 #include "store_log.h"
61 #include "store_rebuild.h"
62 #include "Store.h"
63 #include "StoreClient.h"
64 #include "StoreIOState.h"
65 #include "StoreMeta.h"
66 #include "StrList.h"
67 #include "swap_log_op.h"
68 #include "SwapDir.h"
69 #include "tools.h"
70 #if USE_DELAY_POOLS
71 #include "DelayPools.h"
72 #endif
73 #if HAVE_LIMITS_H
74 #include <limits.h>
75 #endif
76
77 static STMCB storeWriteComplete;
78
79 #define REBUILD_TIMESTAMP_DELTA_MAX 2
80
81 #define STORE_IN_MEM_BUCKETS (229)
82
83 /** \todo Convert these string constants to enum string-arrays generated */
84
85 const char *memStatusStr[] = {
86 "NOT_IN_MEMORY",
87 "IN_MEMORY"
88 };
89
90 const char *pingStatusStr[] = {
91 "PING_NONE",
92 "PING_WAITING",
93 "PING_DONE"
94 };
95
96 const char *storeStatusStr[] = {
97 "STORE_OK",
98 "STORE_PENDING"
99 };
100
101 const char *swapStatusStr[] = {
102 "SWAPOUT_NONE",
103 "SWAPOUT_WRITING",
104 "SWAPOUT_DONE"
105 };
106
107 /*
108 * This defines an repl type
109 */
110
111 typedef struct _storerepl_entry storerepl_entry_t;
112
113 struct _storerepl_entry {
114 const char *typestr;
115 REMOVALPOLICYCREATE *create;
116 };
117
118 static storerepl_entry_t *storerepl_list = NULL;
119
120 /*
121 * local function prototypes
122 */
123 static int getKeyCounter(void);
124 static OBJH storeCheckCachableStats;
125 static EVH storeLateRelease;
126
127 /*
128 * local variables
129 */
130 static Stack<StoreEntry*> LateReleaseStack;
131 MemAllocator *StoreEntry::pool = NULL;
132
133 StorePointer Store::CurrentRoot = NULL;
134
135 void
136 Store::Root(Store * aRoot)
137 {
138 CurrentRoot = aRoot;
139 }
140
141 void
142 Store::Root(StorePointer aRoot)
143 {
144 Root(aRoot.getRaw());
145 }
146
147 void
148 Store::Stats(StoreEntry * output)
149 {
150 assert (output);
151 Root().stat(*output);
152 }
153
154 void
155 Store::create()
156 {}
157
158 void
159 Store::diskFull()
160 {}
161
162 void
163 Store::sync()
164 {}
165
166 void
167 Store::unlink (StoreEntry &anEntry)
168 {
169 fatal("Store::unlink on invalid Store\n");
170 }
171
172 void *
173 StoreEntry::operator new (size_t bytecount)
174 {
175 assert (bytecount == sizeof (StoreEntry));
176
177 if (!pool) {
178 pool = memPoolCreate ("StoreEntry", bytecount);
179 pool->setChunkSize(2048 * 1024);
180 }
181
182 return pool->alloc();
183 }
184
185 void
186 StoreEntry::operator delete (void *address)
187 {
188 pool->freeOne(address);
189 }
190
191 void
192 StoreEntry::makePublic()
193 {
194 /* This object can be cached for a long time */
195
196 if (EBIT_TEST(flags, ENTRY_CACHABLE))
197 setPublicKey();
198 }
199
200 void
201 StoreEntry::makePrivate()
202 {
203 /* This object should never be cached at all */
204 expireNow();
205 releaseRequest(); /* delete object when not used */
206 /* releaseRequest clears ENTRY_CACHABLE flag */
207 }
208
209 void
210 StoreEntry::cacheNegatively()
211 {
212 /* This object may be negatively cached */
213 negativeCache();
214
215 if (EBIT_TEST(flags, ENTRY_CACHABLE))
216 setPublicKey();
217 }
218
219 size_t
220 StoreEntry::inUseCount()
221 {
222 if (!pool)
223 return 0;
224 return pool->getInUseCount();
225 }
226
227 const char *
228 StoreEntry::getMD5Text() const
229 {
230 return storeKeyText((const cache_key *)key);
231 }
232
233 #include "comm.h"
234
235 void
236 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
237 {
238 StoreEntry *anEntry = (StoreEntry *)theContext;
239 anEntry->delayAwareRead(aRead.conn,
240 aRead.buf,
241 aRead.len,
242 aRead.callback);
243 }
244
245 void
246 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
247 {
248 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
249 /* sketch: readdeferer* = getdeferer.
250 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
251 */
252
253 if (amountToRead == 0) {
254 assert (mem_obj);
255 /* read ahead limit */
256 /* Perhaps these two calls should both live in MemObject */
257 #if USE_DELAY_POOLS
258 if (!mem_obj->readAheadPolicyCanRead()) {
259 #endif
260 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
261 return;
262 #if USE_DELAY_POOLS
263 }
264
265 /* delay id limit */
266 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
267 return;
268
269 #endif
270
271 }
272
273 if (fd_table[conn->fd].closing()) {
274 // Readers must have closing callbacks if they want to be notified. No
275 // readers appeared to care around 2009/12/14 as they skipped reading
276 // for other reasons. Closing may already be true at the delyaAwareRead
277 // call time or may happen while we wait after delayRead() above.
278 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
279 callback);
280 return; // the read callback will never be called
281 }
282
283 comm_read(conn, buf, amountToRead, callback);
284 }
285
286 size_t
287 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
288 {
289 if (mem_obj == NULL)
290 return aRange.end;
291
292 #if URL_CHECKSUM_DEBUG
293
294 mem_obj->checkUrlChecksum();
295
296 #endif
297
298 if (!mem_obj->readAheadPolicyCanRead())
299 return 0;
300
301 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
302 }
303
304 bool
305 StoreEntry::checkDeferRead(int fd) const
306 {
307 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
308 }
309
310 void
311 StoreEntry::setNoDelay (bool const newValue)
312 {
313 if (mem_obj)
314 mem_obj->setNoDelay(newValue);
315 }
316
317 store_client_t
318 StoreEntry::storeClientType() const
319 {
320 /* The needed offset isn't in memory
321 * XXX TODO: this is wrong for range requests
322 * as the needed offset may *not* be 0, AND
323 * offset 0 in the memory object is the HTTP headers.
324 */
325
326 if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
327 // clients of an object cached in shared memory are memory clients
328 return STORE_MEM_CLIENT;
329 }
330
331 assert(mem_obj);
332
333 if (mem_obj->inmem_lo)
334 return STORE_DISK_CLIENT;
335
336 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
337 /* I don't think we should be adding clients to aborted entries */
338 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
339 return STORE_MEM_CLIENT;
340 }
341
342 if (store_status == STORE_OK) {
343 /* the object has completed. */
344
345 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
346 if (swap_status == SWAPOUT_DONE) {
347 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
348 if (mem_obj->endOffset() == mem_obj->object_sz) {
349 /* hot object fully swapped in */
350 return STORE_MEM_CLIENT;
351 }
352 } else {
353 /* Memory-only, or currently being swapped out */
354 return STORE_MEM_CLIENT;
355 }
356 }
357 return STORE_DISK_CLIENT;
358 }
359
360 /* here and past, entry is STORE_PENDING */
361 /*
362 * If this is the first client, let it be the mem client
363 */
364 if (mem_obj->nclients == 1)
365 return STORE_MEM_CLIENT;
366
367 /*
368 * If there is no disk file to open yet, we must make this a
369 * mem client. If we can't open the swapin file before writing
370 * to the client, there is no guarantee that we will be able
371 * to open it later when we really need it.
372 */
373 if (swap_status == SWAPOUT_NONE)
374 return STORE_MEM_CLIENT;
375
376 /*
377 * otherwise, make subsequent clients read from disk so they
378 * can not delay the first, and vice-versa.
379 */
380 return STORE_DISK_CLIENT;
381 }
382
383 StoreEntry::StoreEntry() :
384 mem_obj(NULL),
385 hidden_mem_obj(NULL),
386 timestamp(-1),
387 lastref(-1),
388 expires(-1),
389 lastmod(-1),
390 swap_file_sz(0),
391 refcount(0),
392 flags(0),
393 swap_filen(-1),
394 swap_dirn(-1),
395 lock_count(0),
396 mem_status(NOT_IN_MEMORY),
397 ping_status(PING_NONE),
398 store_status(STORE_PENDING),
399 swap_status(SWAPOUT_NONE)
400 {
401 debugs(20, 3, HERE << "new StoreEntry " << this);
402 }
403
404 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl) :
405 mem_obj(NULL),
406 hidden_mem_obj(NULL),
407 timestamp(-1),
408 lastref(-1),
409 expires(-1),
410 lastmod(-1),
411 swap_file_sz(0),
412 refcount(0),
413 flags(0),
414 swap_filen(-1),
415 swap_dirn(-1),
416 lock_count(0),
417 mem_status(NOT_IN_MEMORY),
418 ping_status(PING_NONE),
419 store_status(STORE_PENDING),
420 swap_status(SWAPOUT_NONE)
421 {
422 debugs(20, 3, HERE << "new StoreEntry " << this);
423 mem_obj = new MemObject(aUrl, aLogUrl);
424 }
425
426 StoreEntry::~StoreEntry()
427 {
428 if (swap_filen >= 0) {
429 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
430 sd.disconnect(*this);
431 }
432 delete hidden_mem_obj;
433 }
434
435 #if USE_ADAPTATION
436 void
437 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
438 {
439 if (!deferredProducer)
440 deferredProducer = producer;
441 else
442 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
443 *deferredProducer << ", requested call: " << *producer);
444 }
445
446 void
447 StoreEntry::kickProducer()
448 {
449 if (deferredProducer != NULL) {
450 ScheduleCallHere(deferredProducer);
451 deferredProducer = NULL;
452 }
453 }
454 #endif
455
456 void
457 StoreEntry::destroyMemObject()
458 {
459 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
460 setMemStatus(NOT_IN_MEMORY);
461 MemObject *mem = mem_obj;
462 mem_obj = NULL;
463 delete mem;
464 delete hidden_mem_obj;
465 hidden_mem_obj = NULL;
466 }
467
468 void
469 StoreEntry::hideMemObject()
470 {
471 debugs(20, 3, HERE << "hiding " << mem_obj);
472 assert(mem_obj);
473 assert(!hidden_mem_obj);
474 hidden_mem_obj = mem_obj;
475 mem_obj = NULL;
476 }
477
478 void
479 destroyStoreEntry(void *data)
480 {
481 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
482 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
483 assert(e != NULL);
484
485 if (e == NullStoreEntry::getInstance())
486 return;
487
488 e->destroyMemObject();
489
490 e->hashDelete();
491
492 assert(e->key == NULL);
493
494 delete e;
495 }
496
497 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
498
499 void
500 StoreEntry::hashInsert(const cache_key * someKey)
501 {
502 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
503 key = storeKeyDup(someKey);
504 hash_join(store_table, this);
505 }
506
507 void
508 StoreEntry::hashDelete()
509 {
510 hash_remove_link(store_table, this);
511 storeKeyFree((const cache_key *)key);
512 key = NULL;
513 }
514
515 /* -------------------------------------------------------------------------- */
516
517 /* get rid of memory copy of the object */
518 void
519 StoreEntry::purgeMem()
520 {
521 if (mem_obj == NULL)
522 return;
523
524 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
525
526 destroyMemObject();
527
528 if (swap_status != SWAPOUT_DONE)
529 release();
530 }
531
532 /* RBC 20050104 this is wrong- memory ref counting
533 * is not at all equivalent to the store 'usage' concept
534 * which the replacement policies should be acting upon.
535 * specifically, object iteration within stores needs
536 * memory ref counting to prevent race conditions,
537 * but this should not influence store replacement.
538 */
539 void
540
541 StoreEntry::lock()
542 {
543 ++lock_count;
544 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
545 lock_count );
546 lastref = squid_curtime;
547 Store::Root().reference(*this);
548 }
549
550 void
551 StoreEntry::setReleaseFlag()
552 {
553 if (EBIT_TEST(flags, RELEASE_REQUEST))
554 return;
555
556 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
557
558 EBIT_SET(flags, RELEASE_REQUEST);
559 }
560
561 void
562 StoreEntry::releaseRequest()
563 {
564 if (EBIT_TEST(flags, RELEASE_REQUEST))
565 return;
566
567 setReleaseFlag();
568
569 /*
570 * Clear cachable flag here because we might get called before
571 * anyone else even looks at the cachability flag. Also, this
572 * prevents httpMakePublic from really setting a public key.
573 */
574 EBIT_CLR(flags, ENTRY_CACHABLE);
575
576 setPrivateKey();
577 }
578
579 /* unlock object, return -1 if object get released after unlock
580 * otherwise lock_count */
581 int
582 StoreEntry::unlock()
583 {
584 --lock_count;
585 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
586
587 if (lock_count)
588 return (int) lock_count;
589
590 if (store_status == STORE_PENDING)
591 setReleaseFlag();
592
593 assert(storePendingNClients(this) == 0);
594
595 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
596 this->release();
597 return 0;
598 }
599
600 if (EBIT_TEST(flags, KEY_PRIVATE))
601 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
602
603 Store::Root().handleIdleEntry(*this); // may delete us
604 return 0;
605 }
606
607 void
608 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
609 {
610 assert (aClient);
611 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
612
613 if (!result)
614 aClient->created (NullStoreEntry::getInstance());
615 else
616 aClient->created (result);
617 }
618
619 void
620 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
621 {
622 assert (aClient);
623 StoreEntry *result = storeGetPublicByRequest (request);
624
625 if (!result)
626 result = NullStoreEntry::getInstance();
627
628 aClient->created (result);
629 }
630
631 void
632 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
633 {
634 assert (aClient);
635 StoreEntry *result = storeGetPublic (uri, method);
636
637 if (!result)
638 result = NullStoreEntry::getInstance();
639
640 aClient->created (result);
641 }
642
643 StoreEntry *
644 storeGetPublic(const char *uri, const HttpRequestMethod& method)
645 {
646 return Store::Root().get(storeKeyPublic(uri, method));
647 }
648
649 StoreEntry *
650 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
651 {
652 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
653 }
654
655 StoreEntry *
656 storeGetPublicByRequest(HttpRequest * req)
657 {
658 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
659
660 if (e == NULL && req->method == Http::METHOD_HEAD)
661 /* We can generate a HEAD reply from a cached GET object */
662 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
663
664 return e;
665 }
666
667 static int
668 getKeyCounter(void)
669 {
670 static int key_counter = 0;
671
672 if (++key_counter < 0)
673 key_counter = 1;
674
675 return key_counter;
676 }
677
678 /* RBC 20050104 AFAICT this should become simpler:
679 * rather than reinserting with a special key it should be marked
680 * as 'released' and then cleaned up when refcounting indicates.
681 * the StoreHashIndex could well implement its 'released' in the
682 * current manner.
683 * Also, clean log writing should skip over ia,t
684 * Otherwise, we need a 'remove from the index but not the store
685 * concept'.
686 */
687 void
688 StoreEntry::setPrivateKey()
689 {
690 const cache_key *newkey;
691
692 if (key && EBIT_TEST(flags, KEY_PRIVATE))
693 return; /* is already private */
694
695 if (key) {
696 if (swap_filen > -1)
697 storeDirSwapLog(this, SWAP_LOG_DEL);
698
699 hashDelete();
700 }
701
702 if (mem_obj != NULL) {
703 mem_obj->id = getKeyCounter();
704 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
705 } else {
706 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
707 }
708
709 assert(hash_lookup(store_table, newkey) == NULL);
710 EBIT_SET(flags, KEY_PRIVATE);
711 hashInsert(newkey);
712 }
713
714 void
715 StoreEntry::setPublicKey()
716 {
717 StoreEntry *e2 = NULL;
718 const cache_key *newkey;
719
720 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
721 return; /* is already public */
722
723 assert(mem_obj);
724
725 /*
726 * We can't make RELEASE_REQUEST objects public. Depending on
727 * when RELEASE_REQUEST gets set, we might not be swapping out
728 * the object. If we're not swapping out, then subsequent
729 * store clients won't be able to access object data which has
730 * been freed from memory.
731 *
732 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
733 * be set, and StoreEntry::setPublicKey() should not be called.
734 */
735 #if MORE_DEBUG_OUTPUT
736
737 if (EBIT_TEST(flags, RELEASE_REQUEST))
738 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
739
740 #endif
741
742 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
743
744 if (mem_obj->request) {
745 HttpRequest *request = mem_obj->request;
746
747 if (!mem_obj->vary_headers) {
748 /* First handle the case where the object no longer varies */
749 safe_free(request->vary_headers);
750 } else {
751 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
752 /* Oops.. the variance has changed. Kill the base object
753 * to record the new variance key
754 */
755 safe_free(request->vary_headers); /* free old "bad" variance key */
756 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
757
758 if (pe)
759 pe->release();
760 }
761
762 /* Make sure the request knows the variance status */
763 if (!request->vary_headers) {
764 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
765
766 if (vary)
767 request->vary_headers = xstrdup(vary);
768 }
769 }
770
771 // TODO: storeGetPublic() calls below may create unlocked entries.
772 // We should add/use storeHas() API or lock/unlock those entries.
773 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
774 /* Create "vary" base object */
775 String vary;
776 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
777 /* We are allowed to do this typecast */
778 HttpReply *rep = new HttpReply;
779 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
780 vary = mem_obj->getReply()->header.getList(HDR_VARY);
781
782 if (vary.size()) {
783 /* Again, we own this structure layout */
784 rep->header.putStr(HDR_VARY, vary.termedBuf());
785 vary.clean();
786 }
787
788 #if X_ACCELERATOR_VARY
789 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
790
791 if (vary.defined()) {
792 /* Again, we own this structure layout */
793 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
794 vary.clean();
795 }
796
797 #endif
798 pe->replaceHttpReply(rep);
799
800 pe->timestampsSet();
801
802 pe->makePublic();
803
804 pe->complete();
805
806 pe->unlock();
807 }
808
809 newkey = storeKeyPublicByRequest(mem_obj->request);
810 } else
811 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
812
813 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
814 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
815 e2->setPrivateKey();
816 e2->release();
817
818 if (mem_obj->request)
819 newkey = storeKeyPublicByRequest(mem_obj->request);
820 else
821 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
822 }
823
824 if (key)
825 hashDelete();
826
827 EBIT_CLR(flags, KEY_PRIVATE);
828
829 hashInsert(newkey);
830
831 if (swap_filen > -1)
832 storeDirSwapLog(this, SWAP_LOG_ADD);
833 }
834
835 StoreEntry *
836 storeCreateEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
837 {
838 StoreEntry *e = NULL;
839 MemObject *mem = NULL;
840 debugs(20, 3, "storeCreateEntry: '" << url << "'");
841
842 e = new StoreEntry(url, log_url);
843 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
844 mem = e->mem_obj;
845 mem->method = method;
846
847 if (neighbors_do_private_keys || !flags.hierarchical)
848 e->setPrivateKey();
849 else
850 e->setPublicKey();
851
852 if (flags.cachable) {
853 EBIT_SET(e->flags, ENTRY_CACHABLE);
854 EBIT_CLR(e->flags, RELEASE_REQUEST);
855 } else {
856 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
857 e->releaseRequest();
858 }
859
860 e->store_status = STORE_PENDING;
861 e->setMemStatus(NOT_IN_MEMORY);
862 e->refcount = 0;
863 e->lastref = squid_curtime;
864 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
865 e->ping_status = PING_NONE;
866 EBIT_SET(e->flags, ENTRY_VALIDATED);
867 return e;
868 }
869
870 /* Mark object as expired */
871 void
872 StoreEntry::expireNow()
873 {
874 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
875 expires = squid_curtime;
876 }
877
878 void
879 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
880 {
881 PROF_start(storeWriteComplete);
882 StoreEntry *e = (StoreEntry *)data;
883
884 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
885 PROF_stop(storeWriteComplete);
886 return;
887 }
888
889 e->invokeHandlers();
890 PROF_stop(storeWriteComplete);
891 }
892
893 void
894 StoreEntry::write (StoreIOBuffer writeBuffer)
895 {
896 assert(mem_obj != NULL);
897 /* This assert will change when we teach the store to update */
898 PROF_start(StoreEntry_write);
899 assert(store_status == STORE_PENDING);
900
901 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
902 PROF_stop(StoreEntry_write);
903 storeGetMemSpace(writeBuffer.length);
904 mem_obj->write (writeBuffer, storeWriteComplete, this);
905 }
906
907 /* Append incoming data from a primary server to an entry. */
908 void
909 StoreEntry::append(char const *buf, int len)
910 {
911 assert(mem_obj != NULL);
912 assert(len >= 0);
913 assert(store_status == STORE_PENDING);
914
915 StoreIOBuffer tempBuffer;
916 tempBuffer.data = (char *)buf;
917 tempBuffer.length = len;
918 /*
919 * XXX sigh, offset might be < 0 here, but it gets "corrected"
920 * later. This offset crap is such a mess.
921 */
922 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
923 write(tempBuffer);
924 }
925
926 void
927 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
928 {
929 va_list args;
930 va_start(args, fmt);
931
932 storeAppendVPrintf(e, fmt, args);
933 va_end(args);
934 }
935
936 /* used be storeAppendPrintf and Packer */
937 void
938 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
939 {
940 LOCAL_ARRAY(char, buf, 4096);
941 buf[0] = '\0';
942 vsnprintf(buf, 4096, fmt, vargs);
943 e->append(buf, strlen(buf));
944 }
945
946 struct _store_check_cachable_hist {
947
948 struct {
949 int non_get;
950 int not_entry_cachable;
951 int wrong_content_length;
952 int negative_cached;
953 int too_big;
954 int too_small;
955 int private_key;
956 int too_many_open_files;
957 int too_many_open_fds;
958 } no;
959
960 struct {
961 int Default;
962 } yes;
963 } store_check_cachable_hist;
964
965 int
966 storeTooManyDiskFilesOpen(void)
967 {
968 if (Config.max_open_disk_fds == 0)
969 return 0;
970
971 if (store_open_disk_fd > Config.max_open_disk_fds)
972 return 1;
973
974 return 0;
975 }
976
977 int
978 StoreEntry::checkTooSmall()
979 {
980 if (EBIT_TEST(flags, ENTRY_SPECIAL))
981 return 0;
982
983 if (STORE_OK == store_status)
984 if (mem_obj->object_sz < 0 ||
985 mem_obj->object_sz < Config.Store.minObjectSize)
986 return 1;
987 if (getReply()->content_length > -1)
988 if (getReply()->content_length < Config.Store.minObjectSize)
989 return 1;
990 return 0;
991 }
992
993 // TODO: remove checks already performed by swapoutPossible()
994 // TODO: move "too many open..." checks outside -- we are called too early/late
995 int
996 StoreEntry::checkCachable()
997 {
998 #if CACHE_ALL_METHODS
999
1000 if (mem_obj->method != Http::METHOD_GET) {
1001 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
1002 ++store_check_cachable_hist.no.non_get;
1003 } else
1004 #endif
1005 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
1006 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
1007 ++store_check_cachable_hist.no.wrong_content_length;
1008 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
1009 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
1010 ++store_check_cachable_hist.no.not_entry_cachable;
1011 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
1012 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
1013 ++store_check_cachable_hist.no.negative_cached;
1014 return 0; /* avoid release call below */
1015 } else if ((getReply()->content_length > 0 &&
1016 getReply()->content_length
1017 > Config.Store.maxObjectSize) ||
1018 mem_obj->endOffset() > Config.Store.maxObjectSize) {
1019 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1020 ++store_check_cachable_hist.no.too_big;
1021 } else if (checkTooSmall()) {
1022 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1023 ++store_check_cachable_hist.no.too_small;
1024 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1025 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1026 ++store_check_cachable_hist.no.private_key;
1027 } else if (swap_status != SWAPOUT_NONE) {
1028 /*
1029 * here we checked the swap_status because the remaining
1030 * cases are only relevant only if we haven't started swapping
1031 * out the object yet.
1032 */
1033 return 1;
1034 } else if (storeTooManyDiskFilesOpen()) {
1035 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1036 ++store_check_cachable_hist.no.too_many_open_files;
1037 } else if (fdNFree() < RESERVED_FD) {
1038 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1039 ++store_check_cachable_hist.no.too_many_open_fds;
1040 } else {
1041 ++store_check_cachable_hist.yes.Default;
1042 return 1;
1043 }
1044
1045 releaseRequest();
1046 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1047 return 0;
1048 }
1049
1050 void
1051 storeCheckCachableStats(StoreEntry *sentry)
1052 {
1053 storeAppendPrintf(sentry, "Category\t Count\n");
1054
1055 #if CACHE_ALL_METHODS
1056
1057 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1058 store_check_cachable_hist.no.non_get);
1059 #endif
1060
1061 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1062 store_check_cachable_hist.no.not_entry_cachable);
1063 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1064 store_check_cachable_hist.no.wrong_content_length);
1065 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1066 store_check_cachable_hist.no.negative_cached);
1067 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1068 store_check_cachable_hist.no.too_big);
1069 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1070 store_check_cachable_hist.no.too_small);
1071 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1072 store_check_cachable_hist.no.private_key);
1073 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1074 store_check_cachable_hist.no.too_many_open_files);
1075 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1076 store_check_cachable_hist.no.too_many_open_fds);
1077 storeAppendPrintf(sentry, "yes.default\t%d\n",
1078 store_check_cachable_hist.yes.Default);
1079 }
1080
1081 void
1082 StoreEntry::complete()
1083 {
1084 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1085
1086 if (store_status != STORE_PENDING) {
1087 /*
1088 * if we're not STORE_PENDING, then probably we got aborted
1089 * and there should be NO clients on this entry
1090 */
1091 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1092 assert(mem_obj->nclients == 0);
1093 return;
1094 }
1095
1096 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1097 * in use of object_sz?
1098 */
1099 mem_obj->object_sz = mem_obj->endOffset();
1100
1101 store_status = STORE_OK;
1102
1103 assert(mem_status == NOT_IN_MEMORY);
1104
1105 if (!validLength()) {
1106 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1107 releaseRequest();
1108 }
1109
1110 #if USE_CACHE_DIGESTS
1111 if (mem_obj->request)
1112 mem_obj->request->hier.store_complete_stop = current_time;
1113
1114 #endif
1115 /*
1116 * We used to call invokeHandlers, then storeSwapOut. However,
1117 * Madhukar Reddy <myreddy@persistence.com> reported that
1118 * responses without content length would sometimes get released
1119 * in client_side, thinking that the response is incomplete.
1120 */
1121 invokeHandlers();
1122 }
1123
1124 /*
1125 * Someone wants to abort this transfer. Set the reason in the
1126 * request structure, call the server-side callback and mark the
1127 * entry for releasing
1128 */
1129 void
1130 StoreEntry::abort()
1131 {
1132 ++statCounter.aborted_requests;
1133 assert(store_status == STORE_PENDING);
1134 assert(mem_obj != NULL);
1135 debugs(20, 6, "storeAbort: " << getMD5Text());
1136
1137 lock(); /* lock while aborting */
1138 negativeCache();
1139
1140 releaseRequest();
1141
1142 EBIT_SET(flags, ENTRY_ABORTED);
1143
1144 setMemStatus(NOT_IN_MEMORY);
1145
1146 store_status = STORE_OK;
1147
1148 /* Notify the server side */
1149
1150 /*
1151 * DPW 2007-05-07
1152 * Should we check abort.data for validity?
1153 */
1154 if (mem_obj->abort.callback) {
1155 if (!cbdataReferenceValid(mem_obj->abort.data))
1156 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1157 eventAdd("mem_obj->abort.callback",
1158 mem_obj->abort.callback,
1159 mem_obj->abort.data,
1160 0.0,
1161 true);
1162 unregisterAbort();
1163 }
1164
1165 /* XXX Should we reverse these two, so that there is no
1166 * unneeded disk swapping triggered?
1167 */
1168 /* Notify the client side */
1169 invokeHandlers();
1170
1171 // abort swap out, invalidating what was created so far (release follows)
1172 swapOutFileClose(StoreIOState::writerGone);
1173
1174 unlock(); /* unlock */
1175 }
1176
1177 /**
1178 * Clear Memory storage to accommodate the given object len
1179 */
1180 void
1181 storeGetMemSpace(int size)
1182 {
1183 PROF_start(storeGetMemSpace);
1184 StoreEntry *e = NULL;
1185 int released = 0;
1186 static time_t last_check = 0;
1187 size_t pages_needed;
1188 RemovalPurgeWalker *walker;
1189
1190 if (squid_curtime == last_check) {
1191 PROF_stop(storeGetMemSpace);
1192 return;
1193 }
1194
1195 last_check = squid_curtime;
1196
1197 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1198
1199 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1200 PROF_stop(storeGetMemSpace);
1201 return;
1202 }
1203
1204 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1205 " pages");
1206
1207 /* XXX what to set as max_scan here? */
1208 walker = mem_policy->PurgeInit(mem_policy, 100000);
1209
1210 while ((e = walker->Next(walker))) {
1211 e->purgeMem();
1212 ++released;
1213
1214 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1215 break;
1216 }
1217
1218 walker->Done(walker);
1219 debugs(20, 3, "storeGetMemSpace stats:");
1220 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1221 debugs(20, 3, " " << std::setw(6) << released << " were released");
1222 PROF_stop(storeGetMemSpace);
1223 }
1224
1225 /* thunk through to Store::Root().maintain(). Note that this would be better still
1226 * if registered against the root store itself, but that requires more complex
1227 * update logic - bigger fish to fry first. Long term each store when
1228 * it becomes active will self register
1229 */
1230 void
1231 Store::Maintain(void *notused)
1232 {
1233 Store::Root().maintain();
1234
1235 /* Reregister a maintain event .. */
1236 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1237
1238 }
1239
1240 /* The maximum objects to scan for maintain storage space */
1241 #define MAINTAIN_MAX_SCAN 1024
1242 #define MAINTAIN_MAX_REMOVE 64
1243
1244 /*
1245 * This routine is to be called by main loop in main.c.
1246 * It removes expired objects on only one bucket for each time called.
1247 *
1248 * This should get called 1/s from main().
1249 */
1250 void
1251 StoreController::maintain()
1252 {
1253 static time_t last_warn_time = 0;
1254
1255 PROF_start(storeMaintainSwapSpace);
1256 swapDir->maintain();
1257
1258 /* this should be emitted by the oversize dir, not globally */
1259
1260 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1261 if (squid_curtime - last_warn_time > 10) {
1262 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1263 << Store::Root().currentSize() / 1024.0 << " KB > "
1264 << (Store::Root().maxSize() >> 10) << " KB");
1265 last_warn_time = squid_curtime;
1266 }
1267 }
1268
1269 PROF_stop(storeMaintainSwapSpace);
1270 }
1271
1272 /* release an object from a cache */
1273 void
1274 StoreEntry::release()
1275 {
1276 PROF_start(storeRelease);
1277 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1278 /* If, for any reason we can't discard this object because of an
1279 * outstanding request, mark it for pending release */
1280
1281 if (locked()) {
1282 expireNow();
1283 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1284 releaseRequest();
1285 PROF_stop(storeRelease);
1286 return;
1287 }
1288
1289 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1290 setPrivateKey();
1291
1292 if (mem_obj)
1293 destroyMemObject();
1294
1295 if (swap_filen > -1) {
1296 /*
1297 * Fake a call to StoreEntry->lock() When rebuilding is done,
1298 * we'll just call StoreEntry->unlock() on these.
1299 */
1300 ++lock_count;
1301 setReleaseFlag();
1302 LateReleaseStack.push_back(this);
1303 } else {
1304 destroyStoreEntry(static_cast<hash_link *>(this));
1305 // "this" is no longer valid
1306 }
1307
1308 PROF_stop(storeRelease);
1309 return;
1310 }
1311
1312 storeLog(STORE_LOG_RELEASE, this);
1313
1314 if (swap_filen > -1) {
1315 // log before unlink() below clears swap_filen
1316 if (!EBIT_TEST(flags, KEY_PRIVATE))
1317 storeDirSwapLog(this, SWAP_LOG_DEL);
1318
1319 unlink();
1320 }
1321
1322 setMemStatus(NOT_IN_MEMORY);
1323 destroyStoreEntry(static_cast<hash_link *>(this));
1324 PROF_stop(storeRelease);
1325 }
1326
1327 static void
1328 storeLateRelease(void *unused)
1329 {
1330 StoreEntry *e;
1331 int i;
1332 static int n = 0;
1333
1334 if (StoreController::store_dirs_rebuilding) {
1335 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1336 return;
1337 }
1338
1339 for (i = 0; i < 10; ++i) {
1340 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1341
1342 if (e == NULL) {
1343 /* done! */
1344 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1345 return;
1346 }
1347
1348 e->unlock();
1349 ++n;
1350 }
1351
1352 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1353 }
1354
1355 /* return 1 if a store entry is locked */
1356 int
1357 StoreEntry::locked() const
1358 {
1359 if (lock_count)
1360 return 1;
1361
1362 if (swap_status == SWAPOUT_WRITING)
1363 return 1;
1364
1365 if (store_status == STORE_PENDING)
1366 return 1;
1367
1368 /*
1369 * SPECIAL, PUBLIC entries should be "locked"
1370 */
1371 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1372 if (!EBIT_TEST(flags, KEY_PRIVATE))
1373 return 1;
1374
1375 return 0;
1376 }
1377
1378 bool
1379 StoreEntry::validLength() const
1380 {
1381 int64_t diff;
1382 const HttpReply *reply;
1383 assert(mem_obj != NULL);
1384 reply = getReply();
1385 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1386 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1387 objectLen());
1388 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1389 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1390
1391 if (reply->content_length < 0) {
1392 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1393 return 1;
1394 }
1395
1396 if (reply->hdr_sz == 0) {
1397 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1398 return 1;
1399 }
1400
1401 if (mem_obj->method == Http::METHOD_HEAD) {
1402 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1403 return 1;
1404 }
1405
1406 if (reply->sline.status == HTTP_NOT_MODIFIED)
1407 return 1;
1408
1409 if (reply->sline.status == HTTP_NO_CONTENT)
1410 return 1;
1411
1412 diff = reply->hdr_sz + reply->content_length - objectLen();
1413
1414 if (diff == 0)
1415 return 1;
1416
1417 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1418
1419 return 0;
1420 }
1421
1422 static void
1423 storeRegisterWithCacheManager(void)
1424 {
1425 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1426 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1427 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1428 storeCheckCachableStats, 0, 1);
1429 }
1430
1431 void
1432 storeInit(void)
1433 {
1434 storeKeyInit();
1435 mem_policy = createRemovalPolicy(Config.memPolicy);
1436 storeDigestInit();
1437 storeLogOpen();
1438 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1439 Store::Root().init();
1440 storeRebuildStart();
1441
1442 storeRegisterWithCacheManager();
1443 }
1444
1445 void
1446 storeConfigure(void)
1447 {
1448 store_swap_high = (long) (((float) Store::Root().maxSize() *
1449 (float) Config.Swap.highWaterMark) / (float) 100);
1450 store_swap_low = (long) (((float) Store::Root().maxSize() *
1451 (float) Config.Swap.lowWaterMark) / (float) 100);
1452 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1453 }
1454
1455 bool
1456 StoreEntry::memoryCachable() const
1457 {
1458 if (mem_obj == NULL)
1459 return 0;
1460
1461 if (mem_obj->data_hdr.size() == 0)
1462 return 0;
1463
1464 if (mem_obj->inmem_lo != 0)
1465 return 0;
1466
1467 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1468 return 0;
1469
1470 return 1;
1471 }
1472
1473 int
1474 StoreEntry::checkNegativeHit() const
1475 {
1476 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1477 return 0;
1478
1479 if (expires <= squid_curtime)
1480 return 0;
1481
1482 if (store_status != STORE_OK)
1483 return 0;
1484
1485 return 1;
1486 }
1487
1488 /**
1489 * Set object for negative caching.
1490 * Preserves any expiry information given by the server.
1491 * In absence of proper expiry info it will set to expire immediately,
1492 * or with HTTP-violations enabled the configured negative-TTL is observed
1493 */
1494 void
1495 StoreEntry::negativeCache()
1496 {
1497 // XXX: should make the default for expires 0 instead of -1
1498 // so we can distinguish "Expires: -1" from nothing.
1499 if (expires <= 0)
1500 #if USE_HTTP_VIOLATIONS
1501 expires = squid_curtime + Config.negativeTtl;
1502 #else
1503 expires = squid_curtime;
1504 #endif
1505 EBIT_SET(flags, ENTRY_NEGCACHED);
1506 }
1507
1508 void
1509 storeFreeMemory(void)
1510 {
1511 Store::Root(NULL);
1512 #if USE_CACHE_DIGESTS
1513
1514 if (store_digest)
1515 cacheDigestDestroy(store_digest);
1516
1517 #endif
1518
1519 store_digest = NULL;
1520 }
1521
1522 int
1523 expiresMoreThan(time_t expires, time_t when)
1524 {
1525 if (expires < 0) /* No Expires given */
1526 return 1;
1527
1528 return (expires > (squid_curtime + when));
1529 }
1530
1531 int
1532 StoreEntry::validToSend() const
1533 {
1534 if (EBIT_TEST(flags, RELEASE_REQUEST))
1535 return 0;
1536
1537 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1538 if (expires <= squid_curtime)
1539 return 0;
1540
1541 if (EBIT_TEST(flags, ENTRY_ABORTED))
1542 return 0;
1543
1544 return 1;
1545 }
1546
1547 void
1548 StoreEntry::timestampsSet()
1549 {
1550 const HttpReply *reply = getReply();
1551 time_t served_date = reply->date;
1552 int age = reply->header.getInt(HDR_AGE);
1553 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1554 /* make sure that 0 <= served_date <= squid_curtime */
1555
1556 if (served_date < 0 || served_date > squid_curtime)
1557 served_date = squid_curtime;
1558
1559 /* Bug 1791:
1560 * If the returned Date: is more than 24 hours older than
1561 * the squid_curtime, then one of us needs to use NTP to set our
1562 * clock. We'll pretend that our clock is right.
1563 */
1564 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1565 served_date = squid_curtime;
1566
1567 /*
1568 * Compensate with Age header if origin server clock is ahead
1569 * of us and there is a cache in between us and the origin
1570 * server. But DONT compensate if the age value is larger than
1571 * squid_curtime because it results in a negative served_date.
1572 */
1573 if (age > squid_curtime - served_date)
1574 if (squid_curtime > age)
1575 served_date = squid_curtime - age;
1576
1577 // compensate for Squid-to-server and server-to-Squid delays
1578 if (mem_obj && mem_obj->request) {
1579 const time_t request_sent =
1580 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1581 if (0 < request_sent && request_sent < squid_curtime)
1582 served_date -= (squid_curtime - request_sent);
1583 }
1584
1585 if (reply->expires > 0 && reply->date > -1)
1586 expires = served_date + (reply->expires - reply->date);
1587 else
1588 expires = reply->expires;
1589
1590 lastmod = reply->last_modified;
1591
1592 timestamp = served_date;
1593 }
1594
1595 void
1596 StoreEntry::registerAbort(STABH * cb, void *data)
1597 {
1598 assert(mem_obj);
1599 assert(mem_obj->abort.callback == NULL);
1600 mem_obj->abort.callback = cb;
1601 mem_obj->abort.data = cbdataReference(data);
1602 }
1603
1604 void
1605 StoreEntry::unregisterAbort()
1606 {
1607 assert(mem_obj);
1608 if (mem_obj->abort.callback) {
1609 mem_obj->abort.callback = NULL;
1610 cbdataReferenceDone(mem_obj->abort.data);
1611 }
1612 }
1613
1614 void
1615 StoreEntry::dump(int l) const
1616 {
1617 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1618 debugs(20, l, "StoreEntry->next: " << next);
1619 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1620 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1621 debugs(20, l, "StoreEntry->lastref: " << lastref);
1622 debugs(20, l, "StoreEntry->expires: " << expires);
1623 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1624 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1625 debugs(20, l, "StoreEntry->refcount: " << refcount);
1626 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1627 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1628 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1629 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1630 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1631 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1632 debugs(20, l, "StoreEntry->store_status: " << store_status);
1633 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1634 }
1635
1636 /*
1637 * NOTE, this function assumes only two mem states
1638 */
1639 void
1640 StoreEntry::setMemStatus(mem_status_t new_status)
1641 {
1642 if (new_status == mem_status)
1643 return;
1644
1645 // are we using a shared memory cache?
1646 if (Config.memShared && IamWorkerProcess()) {
1647 // enumerate calling cases if shared memory is enabled
1648 assert(new_status != IN_MEMORY || EBIT_TEST(flags, ENTRY_SPECIAL));
1649 // This method was designed to update replacement policy, not to
1650 // actually purge something from the memory cache (TODO: rename?).
1651 // Shared memory cache does not have a policy that needs updates.
1652 mem_status = new_status;
1653 return;
1654 }
1655
1656 assert(mem_obj != NULL);
1657
1658 if (new_status == IN_MEMORY) {
1659 assert(mem_obj->inmem_lo == 0);
1660
1661 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1662 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1663 } else {
1664 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1665 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1666 }
1667
1668 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1669 } else {
1670 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1671 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1672 } else {
1673 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1674 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1675 }
1676
1677 --hot_obj_count;
1678 }
1679
1680 mem_status = new_status;
1681 }
1682
1683 const char *
1684 StoreEntry::url() const
1685 {
1686 if (this == NULL)
1687 return "[null_entry]";
1688 else if (mem_obj == NULL)
1689 return "[null_mem_obj]";
1690 else
1691 return mem_obj->url;
1692 }
1693
1694 void
1695 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1696 {
1697 debugs(20, 3, "A mem_obj create attempted using : " << aUrl);
1698
1699 if (mem_obj)
1700 return;
1701
1702 if (hidden_mem_obj) {
1703 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1704 mem_obj = hidden_mem_obj;
1705 hidden_mem_obj = NULL;
1706 mem_obj->resetUrls(aUrl, aLogUrl);
1707 return;
1708 }
1709
1710 mem_obj = new MemObject(aUrl, aLogUrl);
1711 }
1712
1713 /* this just sets DELAY_SENDING */
1714 void
1715 StoreEntry::buffer()
1716 {
1717 EBIT_SET(flags, DELAY_SENDING);
1718 }
1719
1720 /* this just clears DELAY_SENDING and Invokes the handlers */
1721 void
1722 StoreEntry::flush()
1723 {
1724 if (EBIT_TEST(flags, DELAY_SENDING)) {
1725 EBIT_CLR(flags, DELAY_SENDING);
1726 invokeHandlers();
1727 }
1728 }
1729
1730 int64_t
1731 StoreEntry::objectLen() const
1732 {
1733 assert(mem_obj != NULL);
1734 return mem_obj->object_sz;
1735 }
1736
1737 int64_t
1738 StoreEntry::contentLen() const
1739 {
1740 assert(mem_obj != NULL);
1741 assert(getReply() != NULL);
1742 return objectLen() - getReply()->hdr_sz;
1743 }
1744
1745 HttpReply const *
1746 StoreEntry::getReply () const
1747 {
1748 if (NULL == mem_obj)
1749 return NULL;
1750
1751 return mem_obj->getReply();
1752 }
1753
1754 void
1755 StoreEntry::reset()
1756 {
1757 assert (mem_obj);
1758 debugs(20, 3, "StoreEntry::reset: " << url());
1759 mem_obj->reset();
1760 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1761 rep->reset();
1762 expires = lastmod = timestamp = -1;
1763 }
1764
1765 /*
1766 * storeFsInit
1767 *
1768 * This routine calls the SETUP routine for each fs type.
1769 * I don't know where the best place for this is, and I'm not going to shuffle
1770 * around large chunks of code right now (that can be done once its working.)
1771 */
1772 void
1773 storeFsInit(void)
1774 {
1775 storeReplSetup();
1776 }
1777
1778 /*
1779 * called to add another store removal policy module
1780 */
1781 void
1782 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1783 {
1784 int i;
1785
1786 /* find the number of currently known repl types */
1787 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1788 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1789 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1790 return;
1791 }
1792 }
1793
1794 /* add the new type */
1795 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1796
1797 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1798
1799 storerepl_list[i].typestr = type;
1800
1801 storerepl_list[i].create = create;
1802 }
1803
1804 /*
1805 * Create a removal policy instance
1806 */
1807 RemovalPolicy *
1808 createRemovalPolicy(RemovalPolicySettings * settings)
1809 {
1810 storerepl_entry_t *r;
1811
1812 for (r = storerepl_list; r && r->typestr; ++r) {
1813 if (strcmp(r->typestr, settings->type) == 0)
1814 return r->create(settings->args);
1815 }
1816
1817 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1818 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1819 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1820 fatalf("ERROR: Unknown policy %s\n", settings->type);
1821 return NULL; /* NOTREACHED */
1822 }
1823
1824 #if 0
1825 void
1826 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1827 {
1828 if (e->swap_file_number == filn)
1829 return;
1830
1831 if (filn < 0) {
1832 assert(-1 == filn);
1833 storeDirMapBitReset(e->swap_file_number);
1834 storeDirLRUDelete(e);
1835 e->swap_file_number = -1;
1836 } else {
1837 assert(-1 == e->swap_file_number);
1838 storeDirMapBitSet(e->swap_file_number = filn);
1839 storeDirLRUAdd(e);
1840 }
1841 }
1842
1843 #endif
1844
1845 /*
1846 * Replace a store entry with
1847 * a new reply. This eats the reply.
1848 */
1849 void
1850 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1851 {
1852 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1853
1854 if (!mem_obj) {
1855 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1856 return;
1857 }
1858
1859 mem_obj->replaceHttpReply(rep);
1860
1861 if (andStartWriting)
1862 startWriting();
1863 }
1864
1865 void
1866 StoreEntry::startWriting()
1867 {
1868 Packer p;
1869
1870 /* TODO: when we store headers serparately remove the header portion */
1871 /* TODO: mark the length of the headers ? */
1872 /* We ONLY want the headers */
1873 packerToStoreInit(&p, this);
1874
1875 assert (isEmpty());
1876 assert(mem_obj);
1877
1878 const HttpReply *rep = getReply();
1879 assert(rep);
1880
1881 rep->packHeadersInto(&p);
1882 mem_obj->markEndOfReplyHeaders();
1883
1884 rep->body.packInto(&p);
1885
1886 packerClean(&p);
1887 }
1888
1889 char const *
1890 StoreEntry::getSerialisedMetaData()
1891 {
1892 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1893 int swap_hdr_sz;
1894 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1895 storeSwapTLVFree(tlv_list);
1896 assert (swap_hdr_sz >= 0);
1897 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1898 return result;
1899 }
1900
1901 void
1902 StoreEntry::trimMemory(const bool preserveSwappable)
1903 {
1904 /*
1905 * DPW 2007-05-09
1906 * Bug #1943. We must not let go any data for IN_MEMORY
1907 * objects. We have to wait until the mem_status changes.
1908 */
1909 if (mem_status == IN_MEMORY)
1910 return;
1911
1912 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1913 return; // cannot trim because we do not load them again
1914
1915 if (!preserveSwappable) {
1916 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1917 /* Nothing to do */
1918 return;
1919 }
1920 /*
1921 * Its not swap-able, and we're about to delete a chunk,
1922 * so we must make it PRIVATE. This is tricky/ugly because
1923 * for the most part, we treat swapable == cachable here.
1924 */
1925 releaseRequest();
1926 mem_obj->trimUnSwappable ();
1927 } else {
1928 mem_obj->trimSwappable ();
1929 }
1930 }
1931
1932 bool
1933 StoreEntry::modifiedSince(HttpRequest * request) const
1934 {
1935 int object_length;
1936 time_t mod_time = lastmod;
1937
1938 if (mod_time < 0)
1939 mod_time = timestamp;
1940
1941 debugs(88, 3, "modifiedSince: '" << url() << "'");
1942
1943 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1944
1945 if (mod_time < 0)
1946 return true;
1947
1948 /* Find size of the object */
1949 object_length = getReply()->content_length;
1950
1951 if (object_length < 0)
1952 object_length = contentLen();
1953
1954 if (mod_time > request->ims) {
1955 debugs(88, 3, "--> YES: entry newer than client");
1956 return true;
1957 } else if (mod_time < request->ims) {
1958 debugs(88, 3, "--> NO: entry older than client");
1959 return false;
1960 } else if (request->imslen < 0) {
1961 debugs(88, 3, "--> NO: same LMT, no client length");
1962 return false;
1963 } else if (request->imslen == object_length) {
1964 debugs(88, 3, "--> NO: same LMT, same length");
1965 return false;
1966 } else {
1967 debugs(88, 3, "--> YES: same LMT, different length");
1968 return true;
1969 }
1970 }
1971
1972 bool
1973 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1974 {
1975 const String reqETags = request.header.getList(HDR_IF_MATCH);
1976 return hasOneOfEtags(reqETags, false);
1977 }
1978
1979 bool
1980 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1981 {
1982 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1983 // weak comparison is allowed only for HEAD or full-body GET requests
1984 const bool allowWeakMatch = !request.flags.isRanged &&
1985 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1986 return hasOneOfEtags(reqETags, allowWeakMatch);
1987 }
1988
1989 /// whether at least one of the request ETags matches entity ETag
1990 bool
1991 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1992 {
1993 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1994 if (!repETag.str)
1995 return strListIsMember(&reqETags, "*", ',');
1996
1997 bool matched = false;
1998 const char *pos = NULL;
1999 const char *item;
2000 int ilen;
2001 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2002 if (!strncmp(item, "*", ilen))
2003 matched = true;
2004 else {
2005 String str;
2006 str.append(item, ilen);
2007 ETag reqETag;
2008 if (etagParseInit(&reqETag, str.termedBuf())) {
2009 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2010 etagIsStrongEqual(repETag, reqETag);
2011 }
2012 }
2013 }
2014 return matched;
2015 }
2016
2017 SwapDir::Pointer
2018 StoreEntry::store() const
2019 {
2020 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2021 return INDEXSD(swap_dirn);
2022 }
2023
2024 void
2025 StoreEntry::unlink()
2026 {
2027 store()->unlink(*this); // implies disconnect()
2028 swap_filen = -1;
2029 swap_dirn = -1;
2030 swap_status = SWAPOUT_NONE;
2031 }
2032
2033 /*
2034 * return true if the entry is in a state where
2035 * it can accept more data (ie with write() method)
2036 */
2037 bool
2038 StoreEntry::isAccepting() const
2039 {
2040 if (STORE_PENDING != store_status)
2041 return false;
2042
2043 if (EBIT_TEST(flags, ENTRY_ABORTED))
2044 return false;
2045
2046 return true;
2047 }
2048
2049 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2050 {
2051 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2052 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2053 e.swap_status;
2054 }
2055
2056 /* NullStoreEntry */
2057
2058 NullStoreEntry NullStoreEntry::_instance;
2059
2060 NullStoreEntry *
2061 NullStoreEntry::getInstance()
2062 {
2063 return &_instance;
2064 }
2065
2066 char const *
2067 NullStoreEntry::getMD5Text() const
2068 {
2069 return "N/A";
2070 }
2071
2072 void
2073 NullStoreEntry::operator delete(void*)
2074 {
2075 fatal ("Attempt to delete NullStoreEntry\n");
2076 }
2077
2078 char const *
2079 NullStoreEntry::getSerialisedMetaData()
2080 {
2081 return NULL;
2082 }
2083
2084 #if !_USE_INLINE_
2085 #include "Store.cci"
2086 #endif