]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Renamed squid.h to squid-old.h and config.h to squid.h
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 20 Storage Manager
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid-old.h"
37 #include "CacheManager.h"
38 #include "comm/Connection.h"
39 #include "ETag.h"
40 #include "event.h"
41 #include "fde.h"
42 #include "Store.h"
43 #include "mgr/Registration.h"
44 #include "StoreClient.h"
45 #include "stmem.h"
46 #include "HttpReply.h"
47 #include "HttpRequest.h"
48 #include "MemObject.h"
49 #include "mem_node.h"
50 #include "StatCounters.h"
51 #include "StoreMeta.h"
52 #include "SwapDir.h"
53 #include "StoreIOState.h"
54 #if USE_DELAY_POOLS
55 #include "DelayPools.h"
56 #endif
57 #include "Stack.h"
58 #include "SquidTime.h"
59 #include "swap_log_op.h"
60 #include "mgr/StoreIoAction.h"
61
62 static STMCB storeWriteComplete;
63
64 #define REBUILD_TIMESTAMP_DELTA_MAX 2
65
66 #define STORE_IN_MEM_BUCKETS (229)
67
68
69 /** \todo Convert these string constants to enum string-arrays generated */
70
71 const char *memStatusStr[] = {
72 "NOT_IN_MEMORY",
73 "IN_MEMORY"
74 };
75
76 const char *pingStatusStr[] = {
77 "PING_NONE",
78 "PING_WAITING",
79 "PING_DONE"
80 };
81
82 const char *storeStatusStr[] = {
83 "STORE_OK",
84 "STORE_PENDING"
85 };
86
87 const char *swapStatusStr[] = {
88 "SWAPOUT_NONE",
89 "SWAPOUT_WRITING",
90 "SWAPOUT_DONE"
91 };
92
93
94 /*
95 * This defines an repl type
96 */
97
98 typedef struct _storerepl_entry storerepl_entry_t;
99
100 struct _storerepl_entry {
101 const char *typestr;
102 REMOVALPOLICYCREATE *create;
103 };
104
105 static storerepl_entry_t *storerepl_list = NULL;
106
107
108 /*
109 * local function prototypes
110 */
111 static int getKeyCounter(void);
112 static OBJH storeCheckCachableStats;
113 static EVH storeLateRelease;
114
115 /*
116 * local variables
117 */
118 static Stack<StoreEntry*> LateReleaseStack;
119 MemAllocator *StoreEntry::pool = NULL;
120
121 StorePointer Store::CurrentRoot = NULL;
122
123 void
124 Store::Root(Store * aRoot)
125 {
126 CurrentRoot = aRoot;
127 }
128
129 void
130 Store::Root(StorePointer aRoot)
131 {
132 Root(aRoot.getRaw());
133 }
134
135 void
136 Store::Stats(StoreEntry * output)
137 {
138 assert (output);
139 Root().stat(*output);
140 }
141
142 void
143 Store::create()
144 {}
145
146 void
147 Store::diskFull()
148 {}
149
150 void
151 Store::sync()
152 {}
153
154 void
155 Store::unlink (StoreEntry &anEntry)
156 {
157 fatal("Store::unlink on invalid Store\n");
158 }
159
160 void *
161 StoreEntry::operator new (size_t bytecount)
162 {
163 assert (bytecount == sizeof (StoreEntry));
164
165 if (!pool) {
166 pool = memPoolCreate ("StoreEntry", bytecount);
167 pool->setChunkSize(2048 * 1024);
168 }
169
170 return pool->alloc();
171 }
172
173 void
174 StoreEntry::operator delete (void *address)
175 {
176 pool->freeOne(address);
177 }
178
179 void
180 StoreEntry::makePublic()
181 {
182 /* This object can be cached for a long time */
183
184 if (EBIT_TEST(flags, ENTRY_CACHABLE))
185 setPublicKey();
186 }
187
188 void
189 StoreEntry::makePrivate()
190 {
191 /* This object should never be cached at all */
192 expireNow();
193 releaseRequest(); /* delete object when not used */
194 /* releaseRequest clears ENTRY_CACHABLE flag */
195 }
196
197 void
198 StoreEntry::cacheNegatively()
199 {
200 /* This object may be negatively cached */
201 negativeCache();
202
203 if (EBIT_TEST(flags, ENTRY_CACHABLE))
204 setPublicKey();
205 }
206
207 size_t
208 StoreEntry::inUseCount()
209 {
210 if (!pool)
211 return 0;
212 return pool->getInUseCount();
213 }
214
215 const char *
216 StoreEntry::getMD5Text() const
217 {
218 return storeKeyText((const cache_key *)key);
219 }
220
221 #include "comm.h"
222
223 void
224 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
225 {
226 StoreEntry *anEntry = (StoreEntry *)theContext;
227 anEntry->delayAwareRead(aRead.conn,
228 aRead.buf,
229 aRead.len,
230 aRead.callback);
231 }
232
233 void
234 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
235 {
236 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
237 /* sketch: readdeferer* = getdeferer.
238 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
239 */
240
241 if (amountToRead == 0) {
242 assert (mem_obj);
243 /* read ahead limit */
244 /* Perhaps these two calls should both live in MemObject */
245 #if USE_DELAY_POOLS
246 if (!mem_obj->readAheadPolicyCanRead()) {
247 #endif
248 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
249 return;
250 #if USE_DELAY_POOLS
251 }
252
253 /* delay id limit */
254 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
255 return;
256
257 #endif
258
259 }
260
261 if (fd_table[conn->fd].closing()) {
262 // Readers must have closing callbacks if they want to be notified. No
263 // readers appeared to care around 2009/12/14 as they skipped reading
264 // for other reasons. Closing may already be true at the delyaAwareRead
265 // call time or may happen while we wait after delayRead() above.
266 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
267 callback);
268 return; // the read callback will never be called
269 }
270
271 comm_read(conn, buf, amountToRead, callback);
272 }
273
274 size_t
275 StoreEntry::bytesWanted (Range<size_t> const aRange) const
276 {
277 assert (aRange.size());
278
279 if (mem_obj == NULL)
280 return aRange.end - 1;
281
282 #if URL_CHECKSUM_DEBUG
283
284 mem_obj->checkUrlChecksum();
285
286 #endif
287
288 /* Always read *something* here - we haven't got the header yet */
289 if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT))
290 return aRange.end - 1;
291
292 if (!mem_obj->readAheadPolicyCanRead())
293 return 0;
294
295 return mem_obj->mostBytesWanted(aRange.end - 1);
296 }
297
298 bool
299 StoreEntry::checkDeferRead(int fd) const
300 {
301 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
302 }
303
304 void
305 StoreEntry::setNoDelay (bool const newValue)
306 {
307 if (mem_obj)
308 mem_obj->setNoDelay(newValue);
309 }
310
311 store_client_t
312 StoreEntry::storeClientType() const
313 {
314 /* The needed offset isn't in memory
315 * XXX TODO: this is wrong for range requests
316 * as the needed offset may *not* be 0, AND
317 * offset 0 in the memory object is the HTTP headers.
318 */
319
320 if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
321 // clients of an object cached in shared memory are memory clients
322 return STORE_MEM_CLIENT;
323 }
324
325 assert(mem_obj);
326
327 if (mem_obj->inmem_lo)
328 return STORE_DISK_CLIENT;
329
330 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
331 /* I don't think we should be adding clients to aborted entries */
332 debugs(20, 1, "storeClientType: adding to ENTRY_ABORTED entry");
333 return STORE_MEM_CLIENT;
334 }
335
336 if (store_status == STORE_OK) {
337 /* the object has completed. */
338
339 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
340 if (swap_status == SWAPOUT_DONE) {
341 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
342 if (mem_obj->endOffset() == mem_obj->object_sz) {
343 /* hot object fully swapped in */
344 return STORE_MEM_CLIENT;
345 }
346 } else {
347 /* Memory-only, or currently being swapped out */
348 return STORE_MEM_CLIENT;
349 }
350 }
351 return STORE_DISK_CLIENT;
352 }
353
354 /* here and past, entry is STORE_PENDING */
355 /*
356 * If this is the first client, let it be the mem client
357 */
358 if (mem_obj->nclients == 1)
359 return STORE_MEM_CLIENT;
360
361 /*
362 * If there is no disk file to open yet, we must make this a
363 * mem client. If we can't open the swapin file before writing
364 * to the client, there is no guarantee that we will be able
365 * to open it later when we really need it.
366 */
367 if (swap_status == SWAPOUT_NONE)
368 return STORE_MEM_CLIENT;
369
370 /*
371 * otherwise, make subsequent clients read from disk so they
372 * can not delay the first, and vice-versa.
373 */
374 return STORE_DISK_CLIENT;
375 }
376
377 StoreEntry::StoreEntry():
378 hidden_mem_obj(NULL),
379 swap_file_sz(0)
380 {
381 debugs(20, 3, HERE << "new StoreEntry " << this);
382 mem_obj = NULL;
383
384 expires = lastmod = lastref = timestamp = -1;
385
386 swap_status = SWAPOUT_NONE;
387 swap_filen = -1;
388 swap_dirn = -1;
389 }
390
391 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl):
392 hidden_mem_obj(NULL),
393 swap_file_sz(0)
394 {
395 debugs(20, 3, HERE << "new StoreEntry " << this);
396 mem_obj = new MemObject(aUrl, aLogUrl);
397
398 expires = lastmod = lastref = timestamp = -1;
399
400 swap_status = SWAPOUT_NONE;
401 swap_filen = -1;
402 swap_dirn = -1;
403 }
404
405 StoreEntry::~StoreEntry()
406 {
407 if (swap_filen >= 0) {
408 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
409 sd.disconnect(*this);
410 }
411 delete hidden_mem_obj;
412 }
413
414 #if USE_ADAPTATION
415 void
416 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
417 {
418 if (!deferredProducer)
419 deferredProducer = producer;
420 else
421 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
422 *deferredProducer << ", requested call: " << *producer);
423 }
424
425 void
426 StoreEntry::kickProducer()
427 {
428 if (deferredProducer != NULL) {
429 ScheduleCallHere(deferredProducer);
430 deferredProducer = NULL;
431 }
432 }
433 #endif
434
435 void
436 StoreEntry::destroyMemObject()
437 {
438 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
439 setMemStatus(NOT_IN_MEMORY);
440 MemObject *mem = mem_obj;
441 mem_obj = NULL;
442 delete mem;
443 delete hidden_mem_obj;
444 hidden_mem_obj = NULL;
445 }
446
447 void
448 StoreEntry::hideMemObject()
449 {
450 debugs(20, 3, HERE << "hiding " << mem_obj);
451 assert(mem_obj);
452 assert(!hidden_mem_obj);
453 hidden_mem_obj = mem_obj;
454 mem_obj = NULL;
455 }
456
457 void
458 destroyStoreEntry(void *data)
459 {
460 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
461 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
462 assert(e != NULL);
463
464 if (e == NullStoreEntry::getInstance())
465 return;
466
467 e->destroyMemObject();
468
469 e->hashDelete();
470
471 assert(e->key == NULL);
472
473 delete e;
474 }
475
476 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
477
478 void
479 StoreEntry::hashInsert(const cache_key * someKey)
480 {
481 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
482 key = storeKeyDup(someKey);
483 hash_join(store_table, this);
484 }
485
486 void
487 StoreEntry::hashDelete()
488 {
489 hash_remove_link(store_table, this);
490 storeKeyFree((const cache_key *)key);
491 key = NULL;
492 }
493
494 /* -------------------------------------------------------------------------- */
495
496
497 /* get rid of memory copy of the object */
498 void
499 StoreEntry::purgeMem()
500 {
501 if (mem_obj == NULL)
502 return;
503
504 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
505
506 destroyMemObject();
507
508 if (swap_status != SWAPOUT_DONE)
509 release();
510 }
511
512 /* RBC 20050104 this is wrong- memory ref counting
513 * is not at all equivalent to the store 'usage' concept
514 * which the replacement policies should be acting upon.
515 * specifically, object iteration within stores needs
516 * memory ref counting to prevent race conditions,
517 * but this should not influence store replacement.
518 */
519 void
520
521 StoreEntry::lock()
522 {
523 lock_count++;
524 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
525 lock_count );
526 lastref = squid_curtime;
527 Store::Root().reference(*this);
528 }
529
530 void
531 StoreEntry::setReleaseFlag()
532 {
533 if (EBIT_TEST(flags, RELEASE_REQUEST))
534 return;
535
536 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
537
538 EBIT_SET(flags, RELEASE_REQUEST);
539 }
540
541 void
542 StoreEntry::releaseRequest()
543 {
544 if (EBIT_TEST(flags, RELEASE_REQUEST))
545 return;
546
547 setReleaseFlag();
548
549 /*
550 * Clear cachable flag here because we might get called before
551 * anyone else even looks at the cachability flag. Also, this
552 * prevents httpMakePublic from really setting a public key.
553 */
554 EBIT_CLR(flags, ENTRY_CACHABLE);
555
556 setPrivateKey();
557 }
558
559 /* unlock object, return -1 if object get released after unlock
560 * otherwise lock_count */
561 int
562 StoreEntry::unlock()
563 {
564 lock_count--;
565 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
566
567 if (lock_count)
568 return (int) lock_count;
569
570 if (store_status == STORE_PENDING)
571 setReleaseFlag();
572
573 assert(storePendingNClients(this) == 0);
574
575 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
576 this->release();
577 return 0;
578 }
579
580 if (EBIT_TEST(flags, KEY_PRIVATE))
581 debugs(20, 1, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
582
583 Store::Root().handleIdleEntry(*this); // may delete us
584 return 0;
585 }
586
587 void
588 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
589 {
590 assert (aClient);
591 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
592
593 if (!result)
594 aClient->created (NullStoreEntry::getInstance());
595 else
596 aClient->created (result);
597 }
598
599 void
600 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
601 {
602 assert (aClient);
603 StoreEntry *result = storeGetPublicByRequest (request);
604
605 if (!result)
606 result = NullStoreEntry::getInstance();
607
608 aClient->created (result);
609 }
610
611 void
612 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
613 {
614 assert (aClient);
615 StoreEntry *result = storeGetPublic (uri, method);
616
617 if (!result)
618 result = NullStoreEntry::getInstance();
619
620 aClient->created (result);
621 }
622
623 StoreEntry *
624 storeGetPublic(const char *uri, const HttpRequestMethod& method)
625 {
626 return Store::Root().get(storeKeyPublic(uri, method));
627 }
628
629 StoreEntry *
630 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
631 {
632 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
633 }
634
635 StoreEntry *
636 storeGetPublicByRequest(HttpRequest * req)
637 {
638 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
639
640 if (e == NULL && req->method == METHOD_HEAD)
641 /* We can generate a HEAD reply from a cached GET object */
642 e = storeGetPublicByRequestMethod(req, METHOD_GET);
643
644 return e;
645 }
646
647 static int
648 getKeyCounter(void)
649 {
650 static int key_counter = 0;
651
652 if (++key_counter < 0)
653 key_counter = 1;
654
655 return key_counter;
656 }
657
658 /* RBC 20050104 AFAICT this should become simpler:
659 * rather than reinserting with a special key it should be marked
660 * as 'released' and then cleaned up when refcounting indicates.
661 * the StoreHashIndex could well implement its 'released' in the
662 * current manner.
663 * Also, clean log writing should skip over ia,t
664 * Otherwise, we need a 'remove from the index but not the store
665 * concept'.
666 */
667 void
668 StoreEntry::setPrivateKey()
669 {
670 const cache_key *newkey;
671
672 if (key && EBIT_TEST(flags, KEY_PRIVATE))
673 return; /* is already private */
674
675 if (key) {
676 if (swap_filen > -1)
677 storeDirSwapLog(this, SWAP_LOG_DEL);
678
679 hashDelete();
680 }
681
682 if (mem_obj != NULL) {
683 mem_obj->id = getKeyCounter();
684 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
685 } else {
686 newkey = storeKeyPrivate("JUNK", METHOD_NONE, getKeyCounter());
687 }
688
689 assert(hash_lookup(store_table, newkey) == NULL);
690 EBIT_SET(flags, KEY_PRIVATE);
691 hashInsert(newkey);
692 }
693
694 void
695 StoreEntry::setPublicKey()
696 {
697 StoreEntry *e2 = NULL;
698 const cache_key *newkey;
699
700 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
701 return; /* is already public */
702
703 assert(mem_obj);
704
705 /*
706 * We can't make RELEASE_REQUEST objects public. Depending on
707 * when RELEASE_REQUEST gets set, we might not be swapping out
708 * the object. If we're not swapping out, then subsequent
709 * store clients won't be able to access object data which has
710 * been freed from memory.
711 *
712 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
713 * be set, and StoreEntry::setPublicKey() should not be called.
714 */
715 #if MORE_DEBUG_OUTPUT
716
717 if (EBIT_TEST(flags, RELEASE_REQUEST))
718 debugs(20, 1, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
719
720 #endif
721
722 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
723
724 if (mem_obj->request) {
725 HttpRequest *request = mem_obj->request;
726
727 if (!mem_obj->vary_headers) {
728 /* First handle the case where the object no longer varies */
729 safe_free(request->vary_headers);
730 } else {
731 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
732 /* Oops.. the variance has changed. Kill the base object
733 * to record the new variance key
734 */
735 safe_free(request->vary_headers); /* free old "bad" variance key */
736 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
737
738 if (pe)
739 pe->release();
740 }
741
742 /* Make sure the request knows the variance status */
743 if (!request->vary_headers) {
744 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
745
746 if (vary)
747 request->vary_headers = xstrdup(vary);
748 }
749 }
750
751 // TODO: storeGetPublic() calls below may create unlocked entries.
752 // We should add/use storeHas() API or lock/unlock those entries.
753 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
754 /* Create "vary" base object */
755 String vary;
756 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
757 /* We are allowed to do this typecast */
758 HttpReply *rep = new HttpReply;
759 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
760 vary = mem_obj->getReply()->header.getList(HDR_VARY);
761
762 if (vary.size()) {
763 /* Again, we own this structure layout */
764 rep->header.putStr(HDR_VARY, vary.termedBuf());
765 vary.clean();
766 }
767
768 #if X_ACCELERATOR_VARY
769 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
770
771 if (vary.defined()) {
772 /* Again, we own this structure layout */
773 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
774 vary.clean();
775 }
776
777 #endif
778 pe->replaceHttpReply(rep);
779
780 pe->timestampsSet();
781
782 pe->makePublic();
783
784 pe->complete();
785
786 pe->unlock();
787 }
788
789 newkey = storeKeyPublicByRequest(mem_obj->request);
790 } else
791 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
792
793 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
794 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
795 e2->setPrivateKey();
796 e2->release();
797
798 if (mem_obj->request)
799 newkey = storeKeyPublicByRequest(mem_obj->request);
800 else
801 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
802 }
803
804 if (key)
805 hashDelete();
806
807 EBIT_CLR(flags, KEY_PRIVATE);
808
809 hashInsert(newkey);
810
811 if (swap_filen > -1)
812 storeDirSwapLog(this, SWAP_LOG_ADD);
813 }
814
815 StoreEntry *
816 storeCreateEntry(const char *url, const char *log_url, request_flags flags, const HttpRequestMethod& method)
817 {
818 StoreEntry *e = NULL;
819 MemObject *mem = NULL;
820 debugs(20, 3, "storeCreateEntry: '" << url << "'");
821
822 e = new StoreEntry(url, log_url);
823 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
824 mem = e->mem_obj;
825 mem->method = method;
826
827 if (neighbors_do_private_keys || !flags.hierarchical)
828 e->setPrivateKey();
829 else
830 e->setPublicKey();
831
832 if (flags.cachable) {
833 EBIT_SET(e->flags, ENTRY_CACHABLE);
834 EBIT_CLR(e->flags, RELEASE_REQUEST);
835 } else {
836 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
837 e->releaseRequest();
838 }
839
840 e->store_status = STORE_PENDING;
841 e->setMemStatus(NOT_IN_MEMORY);
842 e->refcount = 0;
843 e->lastref = squid_curtime;
844 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
845 e->ping_status = PING_NONE;
846 EBIT_SET(e->flags, ENTRY_VALIDATED);
847 return e;
848 }
849
850 /* Mark object as expired */
851 void
852 StoreEntry::expireNow()
853 {
854 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
855 expires = squid_curtime;
856 }
857
858 void
859 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
860 {
861 PROF_start(storeWriteComplete);
862 StoreEntry *e = (StoreEntry *)data;
863
864 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
865 PROF_stop(storeWriteComplete);
866 return;
867 }
868
869 e->invokeHandlers();
870 PROF_stop(storeWriteComplete);
871 }
872
873 void
874 StoreEntry::write (StoreIOBuffer writeBuffer)
875 {
876 assert(mem_obj != NULL);
877 /* This assert will change when we teach the store to update */
878 PROF_start(StoreEntry_write);
879 assert(store_status == STORE_PENDING);
880
881 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
882 PROF_stop(StoreEntry_write);
883 storeGetMemSpace(writeBuffer.length);
884 mem_obj->write (writeBuffer, storeWriteComplete, this);
885 }
886
887 /* Append incoming data from a primary server to an entry. */
888 void
889 StoreEntry::append(char const *buf, int len)
890 {
891 assert(mem_obj != NULL);
892 assert(len >= 0);
893 assert(store_status == STORE_PENDING);
894
895 StoreIOBuffer tempBuffer;
896 tempBuffer.data = (char *)buf;
897 tempBuffer.length = len;
898 /*
899 * XXX sigh, offset might be < 0 here, but it gets "corrected"
900 * later. This offset crap is such a mess.
901 */
902 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
903 write(tempBuffer);
904 }
905
906
907 void
908 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
909 {
910 va_list args;
911 va_start(args, fmt);
912
913 storeAppendVPrintf(e, fmt, args);
914 va_end(args);
915 }
916
917 /* used be storeAppendPrintf and Packer */
918 void
919 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
920 {
921 LOCAL_ARRAY(char, buf, 4096);
922 buf[0] = '\0';
923 vsnprintf(buf, 4096, fmt, vargs);
924 e->append(buf, strlen(buf));
925 }
926
927 struct _store_check_cachable_hist {
928
929 struct {
930 int non_get;
931 int not_entry_cachable;
932 int wrong_content_length;
933 int negative_cached;
934 int too_big;
935 int too_small;
936 int private_key;
937 int too_many_open_files;
938 int too_many_open_fds;
939 } no;
940
941 struct {
942 int Default;
943 } yes;
944 } store_check_cachable_hist;
945
946 int
947 storeTooManyDiskFilesOpen(void)
948 {
949 if (Config.max_open_disk_fds == 0)
950 return 0;
951
952 if (store_open_disk_fd > Config.max_open_disk_fds)
953 return 1;
954
955 return 0;
956 }
957
958 int
959 StoreEntry::checkTooSmall()
960 {
961 if (EBIT_TEST(flags, ENTRY_SPECIAL))
962 return 0;
963
964 if (STORE_OK == store_status)
965 if (mem_obj->object_sz < 0 ||
966 mem_obj->object_sz < Config.Store.minObjectSize)
967 return 1;
968 if (getReply()->content_length > -1)
969 if (getReply()->content_length < Config.Store.minObjectSize)
970 return 1;
971 return 0;
972 }
973
974 // TODO: remove checks already performed by swapoutPossible()
975 // TODO: move "too many open..." checks outside -- we are called too early/late
976 int
977 StoreEntry::checkCachable()
978 {
979 #if CACHE_ALL_METHODS
980
981 if (mem_obj->method != METHOD_GET) {
982 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
983 store_check_cachable_hist.no.non_get++;
984 } else
985 #endif
986 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
987 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
988 store_check_cachable_hist.no.wrong_content_length++;
989 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
991 store_check_cachable_hist.no.not_entry_cachable++;
992 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
993 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
994 store_check_cachable_hist.no.negative_cached++;
995 return 0; /* avoid release call below */
996 } else if ((getReply()->content_length > 0 &&
997 getReply()->content_length
998 > Config.Store.maxObjectSize) ||
999 mem_obj->endOffset() > Config.Store.maxObjectSize) {
1000 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1001 store_check_cachable_hist.no.too_big++;
1002 } else if (getReply()->content_length > Config.Store.maxObjectSize) {
1003 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1004 store_check_cachable_hist.no.too_big++;
1005 } else if (checkTooSmall()) {
1006 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1007 store_check_cachable_hist.no.too_small++;
1008 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1009 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1010 store_check_cachable_hist.no.private_key++;
1011 } else if (swap_status != SWAPOUT_NONE) {
1012 /*
1013 * here we checked the swap_status because the remaining
1014 * cases are only relevant only if we haven't started swapping
1015 * out the object yet.
1016 */
1017 return 1;
1018 } else if (storeTooManyDiskFilesOpen()) {
1019 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1020 store_check_cachable_hist.no.too_many_open_files++;
1021 } else if (fdNFree() < RESERVED_FD) {
1022 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1023 store_check_cachable_hist.no.too_many_open_fds++;
1024 } else {
1025 store_check_cachable_hist.yes.Default++;
1026 return 1;
1027 }
1028
1029 releaseRequest();
1030 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1031 return 0;
1032 }
1033
1034 void
1035 storeCheckCachableStats(StoreEntry *sentry)
1036 {
1037 storeAppendPrintf(sentry, "Category\t Count\n");
1038
1039 #if CACHE_ALL_METHODS
1040
1041 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1042 store_check_cachable_hist.no.non_get);
1043 #endif
1044
1045 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1046 store_check_cachable_hist.no.not_entry_cachable);
1047 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1048 store_check_cachable_hist.no.wrong_content_length);
1049 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1050 store_check_cachable_hist.no.negative_cached);
1051 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1052 store_check_cachable_hist.no.too_big);
1053 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1054 store_check_cachable_hist.no.too_small);
1055 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1056 store_check_cachable_hist.no.private_key);
1057 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1058 store_check_cachable_hist.no.too_many_open_files);
1059 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1060 store_check_cachable_hist.no.too_many_open_fds);
1061 storeAppendPrintf(sentry, "yes.default\t%d\n",
1062 store_check_cachable_hist.yes.Default);
1063 }
1064
1065 void
1066 StoreEntry::complete()
1067 {
1068 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1069
1070 if (store_status != STORE_PENDING) {
1071 /*
1072 * if we're not STORE_PENDING, then probably we got aborted
1073 * and there should be NO clients on this entry
1074 */
1075 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1076 assert(mem_obj->nclients == 0);
1077 return;
1078 }
1079
1080 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1081 * in use of object_sz?
1082 */
1083 mem_obj->object_sz = mem_obj->endOffset();
1084
1085 store_status = STORE_OK;
1086
1087 assert(mem_status == NOT_IN_MEMORY);
1088
1089 if (!validLength()) {
1090 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1091 releaseRequest();
1092 }
1093
1094 #if USE_CACHE_DIGESTS
1095 if (mem_obj->request)
1096 mem_obj->request->hier.store_complete_stop = current_time;
1097
1098 #endif
1099 /*
1100 * We used to call invokeHandlers, then storeSwapOut. However,
1101 * Madhukar Reddy <myreddy@persistence.com> reported that
1102 * responses without content length would sometimes get released
1103 * in client_side, thinking that the response is incomplete.
1104 */
1105 invokeHandlers();
1106 }
1107
1108 /*
1109 * Someone wants to abort this transfer. Set the reason in the
1110 * request structure, call the server-side callback and mark the
1111 * entry for releasing
1112 */
1113 void
1114 StoreEntry::abort()
1115 {
1116 statCounter.aborted_requests++;
1117 assert(store_status == STORE_PENDING);
1118 assert(mem_obj != NULL);
1119 debugs(20, 6, "storeAbort: " << getMD5Text());
1120
1121 lock(); /* lock while aborting */
1122 negativeCache();
1123
1124 releaseRequest();
1125
1126 EBIT_SET(flags, ENTRY_ABORTED);
1127
1128 setMemStatus(NOT_IN_MEMORY);
1129
1130 store_status = STORE_OK;
1131
1132 /* Notify the server side */
1133
1134 /*
1135 * DPW 2007-05-07
1136 * Should we check abort.data for validity?
1137 */
1138 if (mem_obj->abort.callback) {
1139 if (!cbdataReferenceValid(mem_obj->abort.data))
1140 debugs(20,1,HERE << "queueing event when abort.data is not valid");
1141 eventAdd("mem_obj->abort.callback",
1142 mem_obj->abort.callback,
1143 mem_obj->abort.data,
1144 0.0,
1145 true);
1146 unregisterAbort();
1147 }
1148
1149 /* XXX Should we reverse these two, so that there is no
1150 * unneeded disk swapping triggered?
1151 */
1152 /* Notify the client side */
1153 invokeHandlers();
1154
1155 // abort swap out, invalidating what was created so far (release follows)
1156 swapOutFileClose(StoreIOState::writerGone);
1157
1158 unlock(); /* unlock */
1159 }
1160
1161 /**
1162 * Clear Memory storage to accommodate the given object len
1163 */
1164 void
1165 storeGetMemSpace(int size)
1166 {
1167 PROF_start(storeGetMemSpace);
1168 StoreEntry *e = NULL;
1169 int released = 0;
1170 static time_t last_check = 0;
1171 size_t pages_needed;
1172 RemovalPurgeWalker *walker;
1173
1174 if (squid_curtime == last_check) {
1175 PROF_stop(storeGetMemSpace);
1176 return;
1177 }
1178
1179 last_check = squid_curtime;
1180
1181 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1182
1183 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1184 PROF_stop(storeGetMemSpace);
1185 return;
1186 }
1187
1188 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1189 " pages");
1190
1191 /* XXX what to set as max_scan here? */
1192 walker = mem_policy->PurgeInit(mem_policy, 100000);
1193
1194 while ((e = walker->Next(walker))) {
1195 e->purgeMem();
1196 released++;
1197
1198 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1199 break;
1200 }
1201
1202 walker->Done(walker);
1203 debugs(20, 3, "storeGetMemSpace stats:");
1204 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1205 debugs(20, 3, " " << std::setw(6) << released << " were released");
1206 PROF_stop(storeGetMemSpace);
1207 }
1208
1209
1210 /* thunk through to Store::Root().maintain(). Note that this would be better still
1211 * if registered against the root store itself, but that requires more complex
1212 * update logic - bigger fish to fry first. Long term each store when
1213 * it becomes active will self register
1214 */
1215 void
1216 Store::Maintain(void *notused)
1217 {
1218 Store::Root().maintain();
1219
1220 /* Reregister a maintain event .. */
1221 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1222
1223 }
1224
1225 /* The maximum objects to scan for maintain storage space */
1226 #define MAINTAIN_MAX_SCAN 1024
1227 #define MAINTAIN_MAX_REMOVE 64
1228
1229 /*
1230 * This routine is to be called by main loop in main.c.
1231 * It removes expired objects on only one bucket for each time called.
1232 *
1233 * This should get called 1/s from main().
1234 */
1235 void
1236 StoreController::maintain()
1237 {
1238 static time_t last_warn_time = 0;
1239
1240 PROF_start(storeMaintainSwapSpace);
1241 swapDir->maintain();
1242
1243 /* this should be emitted by the oversize dir, not globally */
1244
1245 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1246 if (squid_curtime - last_warn_time > 10) {
1247 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1248 << Store::Root().currentSize() / 1024.0 << " KB > "
1249 << (Store::Root().maxSize() >> 10) << " KB");
1250 last_warn_time = squid_curtime;
1251 }
1252 }
1253
1254 PROF_stop(storeMaintainSwapSpace);
1255 }
1256
1257 /* release an object from a cache */
1258 void
1259 StoreEntry::release()
1260 {
1261 PROF_start(storeRelease);
1262 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1263 /* If, for any reason we can't discard this object because of an
1264 * outstanding request, mark it for pending release */
1265
1266 if (locked()) {
1267 expireNow();
1268 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1269 releaseRequest();
1270 PROF_stop(storeRelease);
1271 return;
1272 }
1273
1274 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1275 setPrivateKey();
1276
1277 if (mem_obj)
1278 destroyMemObject();
1279
1280 if (swap_filen > -1) {
1281 /*
1282 * Fake a call to StoreEntry->lock() When rebuilding is done,
1283 * we'll just call StoreEntry->unlock() on these.
1284 */
1285 lock_count++;
1286 setReleaseFlag();
1287 LateReleaseStack.push_back(this);
1288 } else {
1289 destroyStoreEntry(static_cast<hash_link *>(this));
1290 // "this" is no longer valid
1291 }
1292
1293 PROF_stop(storeRelease);
1294 return;
1295 }
1296
1297 storeLog(STORE_LOG_RELEASE, this);
1298
1299 if (swap_filen > -1) {
1300 // log before unlink() below clears swap_filen
1301 if (!EBIT_TEST(flags, KEY_PRIVATE))
1302 storeDirSwapLog(this, SWAP_LOG_DEL);
1303
1304 unlink();
1305 }
1306
1307 setMemStatus(NOT_IN_MEMORY);
1308 destroyStoreEntry(static_cast<hash_link *>(this));
1309 PROF_stop(storeRelease);
1310 }
1311
1312 static void
1313 storeLateRelease(void *unused)
1314 {
1315 StoreEntry *e;
1316 int i;
1317 static int n = 0;
1318
1319 if (StoreController::store_dirs_rebuilding) {
1320 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1321 return;
1322 }
1323
1324 for (i = 0; i < 10; i++) {
1325 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1326
1327 if (e == NULL) {
1328 /* done! */
1329 debugs(20, 1, "storeLateRelease: released " << n << " objects");
1330 return;
1331 }
1332
1333 e->unlock();
1334 n++;
1335 }
1336
1337 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1338 }
1339
1340 /* return 1 if a store entry is locked */
1341 int
1342 StoreEntry::locked() const
1343 {
1344 if (lock_count)
1345 return 1;
1346
1347 if (swap_status == SWAPOUT_WRITING)
1348 return 1;
1349
1350 if (store_status == STORE_PENDING)
1351 return 1;
1352
1353 /*
1354 * SPECIAL, PUBLIC entries should be "locked"
1355 */
1356 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1357 if (!EBIT_TEST(flags, KEY_PRIVATE))
1358 return 1;
1359
1360 return 0;
1361 }
1362
1363 bool
1364 StoreEntry::validLength() const
1365 {
1366 int64_t diff;
1367 const HttpReply *reply;
1368 assert(mem_obj != NULL);
1369 reply = getReply();
1370 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1371 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1372 objectLen());
1373 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1374 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1375
1376 if (reply->content_length < 0) {
1377 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1378 return 1;
1379 }
1380
1381 if (reply->hdr_sz == 0) {
1382 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1383 return 1;
1384 }
1385
1386 if (mem_obj->method == METHOD_HEAD) {
1387 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1388 return 1;
1389 }
1390
1391 if (reply->sline.status == HTTP_NOT_MODIFIED)
1392 return 1;
1393
1394 if (reply->sline.status == HTTP_NO_CONTENT)
1395 return 1;
1396
1397 diff = reply->hdr_sz + reply->content_length - objectLen();
1398
1399 if (diff == 0)
1400 return 1;
1401
1402 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1403
1404 return 0;
1405 }
1406
1407 static void
1408 storeRegisterWithCacheManager(void)
1409 {
1410 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1411 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1412 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1413 storeCheckCachableStats, 0, 1);
1414 }
1415
1416 void
1417 storeInit(void)
1418 {
1419 storeKeyInit();
1420 mem_policy = createRemovalPolicy(Config.memPolicy);
1421 storeDigestInit();
1422 storeLogOpen();
1423 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1424 Store::Root().init();
1425 storeRebuildStart();
1426
1427 storeRegisterWithCacheManager();
1428 }
1429
1430 void
1431 storeConfigure(void)
1432 {
1433 store_swap_high = (long) (((float) Store::Root().maxSize() *
1434 (float) Config.Swap.highWaterMark) / (float) 100);
1435 store_swap_low = (long) (((float) Store::Root().maxSize() *
1436 (float) Config.Swap.lowWaterMark) / (float) 100);
1437 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1438 }
1439
1440 bool
1441 StoreEntry::memoryCachable() const
1442 {
1443 if (mem_obj == NULL)
1444 return 0;
1445
1446 if (mem_obj->data_hdr.size() == 0)
1447 return 0;
1448
1449 if (mem_obj->inmem_lo != 0)
1450 return 0;
1451
1452 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1453 return 0;
1454
1455 if (Config.memShared && IamWorkerProcess()) {
1456 const int64_t expectedSize = mem_obj->expectedReplySize();
1457 // objects of unknown size are not allowed into memory cache, for now
1458 if (expectedSize < 0 ||
1459 expectedSize > static_cast<int64_t>(Config.Store.maxInMemObjSize))
1460 return 0;
1461 }
1462
1463 return 1;
1464 }
1465
1466 int
1467 StoreEntry::checkNegativeHit() const
1468 {
1469 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1470 return 0;
1471
1472 if (expires <= squid_curtime)
1473 return 0;
1474
1475 if (store_status != STORE_OK)
1476 return 0;
1477
1478 return 1;
1479 }
1480
1481 /**
1482 * Set object for negative caching.
1483 * Preserves any expiry information given by the server.
1484 * In absence of proper expiry info it will set to expire immediately,
1485 * or with HTTP-violations enabled the configured negative-TTL is observed
1486 */
1487 void
1488 StoreEntry::negativeCache()
1489 {
1490 // XXX: should make the default for expires 0 instead of -1
1491 // so we can distinguish "Expires: -1" from nothing.
1492 if (expires <= 0)
1493 #if USE_HTTP_VIOLATIONS
1494 expires = squid_curtime + Config.negativeTtl;
1495 #else
1496 expires = squid_curtime;
1497 #endif
1498 EBIT_SET(flags, ENTRY_NEGCACHED);
1499 }
1500
1501 void
1502 storeFreeMemory(void)
1503 {
1504 Store::Root(NULL);
1505 #if USE_CACHE_DIGESTS
1506
1507 if (store_digest)
1508 cacheDigestDestroy(store_digest);
1509
1510 #endif
1511
1512 store_digest = NULL;
1513 }
1514
1515 int
1516 expiresMoreThan(time_t expires, time_t when)
1517 {
1518 if (expires < 0) /* No Expires given */
1519 return 1;
1520
1521 return (expires > (squid_curtime + when));
1522 }
1523
1524 int
1525 StoreEntry::validToSend() const
1526 {
1527 if (EBIT_TEST(flags, RELEASE_REQUEST))
1528 return 0;
1529
1530 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1531 if (expires <= squid_curtime)
1532 return 0;
1533
1534 if (EBIT_TEST(flags, ENTRY_ABORTED))
1535 return 0;
1536
1537 return 1;
1538 }
1539
1540 void
1541 StoreEntry::timestampsSet()
1542 {
1543 const HttpReply *reply = getReply();
1544 time_t served_date = reply->date;
1545 int age = reply->header.getInt(HDR_AGE);
1546 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1547 /* make sure that 0 <= served_date <= squid_curtime */
1548
1549 if (served_date < 0 || served_date > squid_curtime)
1550 served_date = squid_curtime;
1551
1552 /* Bug 1791:
1553 * If the returned Date: is more than 24 hours older than
1554 * the squid_curtime, then one of us needs to use NTP to set our
1555 * clock. We'll pretend that our clock is right.
1556 */
1557 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1558 served_date = squid_curtime;
1559
1560 /*
1561 * Compensate with Age header if origin server clock is ahead
1562 * of us and there is a cache in between us and the origin
1563 * server. But DONT compensate if the age value is larger than
1564 * squid_curtime because it results in a negative served_date.
1565 */
1566 if (age > squid_curtime - served_date)
1567 if (squid_curtime > age)
1568 served_date = squid_curtime - age;
1569
1570 // compensate for Squid-to-server and server-to-Squid delays
1571 if (mem_obj && mem_obj->request) {
1572 const time_t request_sent =
1573 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1574 if (0 < request_sent && request_sent < squid_curtime)
1575 served_date -= (squid_curtime - request_sent);
1576 }
1577
1578 if (reply->expires > 0 && reply->date > -1)
1579 expires = served_date + (reply->expires - reply->date);
1580 else
1581 expires = reply->expires;
1582
1583 lastmod = reply->last_modified;
1584
1585 timestamp = served_date;
1586 }
1587
1588 void
1589 StoreEntry::registerAbort(STABH * cb, void *data)
1590 {
1591 assert(mem_obj);
1592 assert(mem_obj->abort.callback == NULL);
1593 mem_obj->abort.callback = cb;
1594 mem_obj->abort.data = cbdataReference(data);
1595 }
1596
1597 void
1598 StoreEntry::unregisterAbort()
1599 {
1600 assert(mem_obj);
1601 if (mem_obj->abort.callback) {
1602 mem_obj->abort.callback = NULL;
1603 cbdataReferenceDone(mem_obj->abort.data);
1604 }
1605 }
1606
1607 void
1608 StoreEntry::dump(int l) const
1609 {
1610 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1611 debugs(20, l, "StoreEntry->next: " << next);
1612 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1613 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1614 debugs(20, l, "StoreEntry->lastref: " << lastref);
1615 debugs(20, l, "StoreEntry->expires: " << expires);
1616 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1617 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1618 debugs(20, l, "StoreEntry->refcount: " << refcount);
1619 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1620 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1621 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1622 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1623 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1624 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1625 debugs(20, l, "StoreEntry->store_status: " << store_status);
1626 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1627 }
1628
1629 /*
1630 * NOTE, this function assumes only two mem states
1631 */
1632 void
1633 StoreEntry::setMemStatus(mem_status_t new_status)
1634 {
1635 if (new_status == mem_status)
1636 return;
1637
1638 // are we using a shared memory cache?
1639 if (Config.memShared && IamWorkerProcess()) {
1640 assert(new_status != IN_MEMORY); // we do not call this otherwise
1641 // This method was designed to update replacement policy, not to
1642 // actually purge something from the memory cache (TODO: rename?).
1643 // Shared memory cache does not have a policy that needs updates.
1644 mem_status = new_status;
1645 return;
1646 }
1647
1648 assert(mem_obj != NULL);
1649
1650 if (new_status == IN_MEMORY) {
1651 assert(mem_obj->inmem_lo == 0);
1652
1653 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1654 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1655 } else {
1656 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1657 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1658 }
1659
1660 hot_obj_count++; // TODO: maintain for the shared hot cache as well
1661 } else {
1662 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1663 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1664 } else {
1665 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1666 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1667 }
1668
1669 hot_obj_count--;
1670 }
1671
1672 mem_status = new_status;
1673 }
1674
1675 const char *
1676 StoreEntry::url() const
1677 {
1678 if (this == NULL)
1679 return "[null_entry]";
1680 else if (mem_obj == NULL)
1681 return "[null_mem_obj]";
1682 else
1683 return mem_obj->url;
1684 }
1685
1686 void
1687 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1688 {
1689 if (mem_obj)
1690 return;
1691
1692 if (hidden_mem_obj) {
1693 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1694 mem_obj = hidden_mem_obj;
1695 hidden_mem_obj = NULL;
1696 mem_obj->resetUrls(aUrl, aLogUrl);
1697 return;
1698 }
1699
1700 mem_obj = new MemObject(aUrl, aLogUrl);
1701 }
1702
1703 /* this just sets DELAY_SENDING */
1704 void
1705 StoreEntry::buffer()
1706 {
1707 EBIT_SET(flags, DELAY_SENDING);
1708 }
1709
1710 /* this just clears DELAY_SENDING and Invokes the handlers */
1711 void
1712 StoreEntry::flush()
1713 {
1714 if (EBIT_TEST(flags, DELAY_SENDING)) {
1715 EBIT_CLR(flags, DELAY_SENDING);
1716 invokeHandlers();
1717 }
1718 }
1719
1720 int64_t
1721 StoreEntry::objectLen() const
1722 {
1723 assert(mem_obj != NULL);
1724 return mem_obj->object_sz;
1725 }
1726
1727 int64_t
1728 StoreEntry::contentLen() const
1729 {
1730 assert(mem_obj != NULL);
1731 assert(getReply() != NULL);
1732 return objectLen() - getReply()->hdr_sz;
1733 }
1734
1735 HttpReply const *
1736 StoreEntry::getReply () const
1737 {
1738 if (NULL == mem_obj)
1739 return NULL;
1740
1741 return mem_obj->getReply();
1742 }
1743
1744 void
1745 StoreEntry::reset()
1746 {
1747 assert (mem_obj);
1748 debugs(20, 3, "StoreEntry::reset: " << url());
1749 mem_obj->reset();
1750 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1751 rep->reset();
1752 expires = lastmod = timestamp = -1;
1753 }
1754
1755 /*
1756 * storeFsInit
1757 *
1758 * This routine calls the SETUP routine for each fs type.
1759 * I don't know where the best place for this is, and I'm not going to shuffle
1760 * around large chunks of code right now (that can be done once its working.)
1761 */
1762 void
1763 storeFsInit(void)
1764 {
1765 storeReplSetup();
1766 }
1767
1768 /*
1769 * called to add another store removal policy module
1770 */
1771 void
1772 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1773 {
1774 int i;
1775
1776 /* find the number of currently known repl types */
1777 for (i = 0; storerepl_list && storerepl_list[i].typestr; i++) {
1778 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1779 debugs(20, 1, "WARNING: Trying to load store replacement policy " << type << " twice.");
1780 return;
1781 }
1782 }
1783
1784 /* add the new type */
1785 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1786
1787 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1788
1789 storerepl_list[i].typestr = type;
1790
1791 storerepl_list[i].create = create;
1792 }
1793
1794 /*
1795 * Create a removal policy instance
1796 */
1797 RemovalPolicy *
1798 createRemovalPolicy(RemovalPolicySettings * settings)
1799 {
1800 storerepl_entry_t *r;
1801
1802 for (r = storerepl_list; r && r->typestr; r++) {
1803 if (strcmp(r->typestr, settings->type) == 0)
1804 return r->create(settings->args);
1805 }
1806
1807 debugs(20, 1, "ERROR: Unknown policy " << settings->type);
1808 debugs(20, 1, "ERROR: Be sure to have set cache_replacement_policy");
1809 debugs(20, 1, "ERROR: and memory_replacement_policy in squid.conf!");
1810 fatalf("ERROR: Unknown policy %s\n", settings->type);
1811 return NULL; /* NOTREACHED */
1812 }
1813
1814 #if 0
1815 void
1816 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1817 {
1818 if (e->swap_file_number == filn)
1819 return;
1820
1821 if (filn < 0) {
1822 assert(-1 == filn);
1823 storeDirMapBitReset(e->swap_file_number);
1824 storeDirLRUDelete(e);
1825 e->swap_file_number = -1;
1826 } else {
1827 assert(-1 == e->swap_file_number);
1828 storeDirMapBitSet(e->swap_file_number = filn);
1829 storeDirLRUAdd(e);
1830 }
1831 }
1832
1833 #endif
1834
1835
1836 /*
1837 * Replace a store entry with
1838 * a new reply. This eats the reply.
1839 */
1840 void
1841 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1842 {
1843 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1844
1845 if (!mem_obj) {
1846 debugs(20, 0, "Attempt to replace object with no in-memory representation");
1847 return;
1848 }
1849
1850 mem_obj->replaceHttpReply(rep);
1851
1852 if (andStartWriting)
1853 startWriting();
1854 }
1855
1856
1857 void
1858 StoreEntry::startWriting()
1859 {
1860 Packer p;
1861
1862 /* TODO: when we store headers serparately remove the header portion */
1863 /* TODO: mark the length of the headers ? */
1864 /* We ONLY want the headers */
1865 packerToStoreInit(&p, this);
1866
1867 assert (isEmpty());
1868 assert(mem_obj);
1869
1870 const HttpReply *rep = getReply();
1871 assert(rep);
1872
1873 rep->packHeadersInto(&p);
1874 mem_obj->markEndOfReplyHeaders();
1875
1876 rep->body.packInto(&p);
1877
1878 packerClean(&p);
1879 }
1880
1881
1882 char const *
1883 StoreEntry::getSerialisedMetaData()
1884 {
1885 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1886 int swap_hdr_sz;
1887 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1888 storeSwapTLVFree(tlv_list);
1889 assert (swap_hdr_sz >= 0);
1890 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1891 return result;
1892 }
1893
1894 void
1895 StoreEntry::trimMemory(const bool preserveSwappable)
1896 {
1897 /*
1898 * DPW 2007-05-09
1899 * Bug #1943. We must not let go any data for IN_MEMORY
1900 * objects. We have to wait until the mem_status changes.
1901 */
1902 if (mem_status == IN_MEMORY)
1903 return;
1904
1905 if (!preserveSwappable) {
1906 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1907 /* Nothing to do */
1908 return;
1909 }
1910 /*
1911 * Its not swap-able, and we're about to delete a chunk,
1912 * so we must make it PRIVATE. This is tricky/ugly because
1913 * for the most part, we treat swapable == cachable here.
1914 */
1915 releaseRequest();
1916 mem_obj->trimUnSwappable ();
1917 } else {
1918 mem_obj->trimSwappable ();
1919 }
1920 }
1921
1922 bool
1923 StoreEntry::modifiedSince(HttpRequest * request) const
1924 {
1925 int object_length;
1926 time_t mod_time = lastmod;
1927
1928 if (mod_time < 0)
1929 mod_time = timestamp;
1930
1931 debugs(88, 3, "modifiedSince: '" << url() << "'");
1932
1933 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1934
1935 if (mod_time < 0)
1936 return true;
1937
1938 /* Find size of the object */
1939 object_length = getReply()->content_length;
1940
1941 if (object_length < 0)
1942 object_length = contentLen();
1943
1944 if (mod_time > request->ims) {
1945 debugs(88, 3, "--> YES: entry newer than client");
1946 return true;
1947 } else if (mod_time < request->ims) {
1948 debugs(88, 3, "--> NO: entry older than client");
1949 return false;
1950 } else if (request->imslen < 0) {
1951 debugs(88, 3, "--> NO: same LMT, no client length");
1952 return false;
1953 } else if (request->imslen == object_length) {
1954 debugs(88, 3, "--> NO: same LMT, same length");
1955 return false;
1956 } else {
1957 debugs(88, 3, "--> YES: same LMT, different length");
1958 return true;
1959 }
1960 }
1961
1962 bool
1963 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1964 {
1965 const String reqETags = request.header.getList(HDR_IF_MATCH);
1966 return hasOneOfEtags(reqETags, false);
1967 }
1968
1969 bool
1970 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1971 {
1972 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1973 // weak comparison is allowed only for HEAD or full-body GET requests
1974 const bool allowWeakMatch = !request.flags.range &&
1975 (request.method == METHOD_GET || request.method == METHOD_HEAD);
1976 return hasOneOfEtags(reqETags, allowWeakMatch);
1977 }
1978
1979 /// whether at least one of the request ETags matches entity ETag
1980 bool
1981 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1982 {
1983 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1984 if (!repETag.str)
1985 return strListIsMember(&reqETags, "*", ',');
1986
1987 bool matched = false;
1988 const char *pos = NULL;
1989 const char *item;
1990 int ilen;
1991 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1992 if (!strncmp(item, "*", ilen))
1993 matched = true;
1994 else {
1995 String str;
1996 str.append(item, ilen);
1997 ETag reqETag;
1998 if (etagParseInit(&reqETag, str.termedBuf())) {
1999 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2000 etagIsStrongEqual(repETag, reqETag);
2001 }
2002 }
2003 }
2004 return matched;
2005 }
2006
2007 SwapDir::Pointer
2008 StoreEntry::store() const
2009 {
2010 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2011 return INDEXSD(swap_dirn);
2012 }
2013
2014 void
2015 StoreEntry::unlink()
2016 {
2017 store()->unlink(*this); // implies disconnect()
2018 swap_filen = -1;
2019 swap_dirn = -1;
2020 swap_status = SWAPOUT_NONE;
2021 }
2022
2023 /*
2024 * return true if the entry is in a state where
2025 * it can accept more data (ie with write() method)
2026 */
2027 bool
2028 StoreEntry::isAccepting() const
2029 {
2030 if (STORE_PENDING != store_status)
2031 return false;
2032
2033 if (EBIT_TEST(flags, ENTRY_ABORTED))
2034 return false;
2035
2036 return true;
2037 }
2038
2039 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2040 {
2041 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2042 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2043 e.swap_status;
2044 }
2045
2046 /* NullStoreEntry */
2047
2048 NullStoreEntry NullStoreEntry::_instance;
2049
2050 NullStoreEntry *
2051 NullStoreEntry::getInstance()
2052 {
2053 return &_instance;
2054 }
2055
2056 char const *
2057 NullStoreEntry::getMD5Text() const
2058 {
2059 return "N/A";
2060 }
2061
2062 void
2063 NullStoreEntry::operator delete(void*)
2064 {
2065 fatal ("Attempt to delete NullStoreEntry\n");
2066 }
2067
2068 char const *
2069 NullStoreEntry::getSerialisedMetaData()
2070 {
2071 return NULL;
2072 }
2073
2074 #if !_USE_INLINE_
2075 #include "Store.cci"
2076 #endif