]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Changed increment operators from postfix to prefix form.
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 20 Storage Manager
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid-old.h"
37 #include "CacheManager.h"
38 #include "comm/Connection.h"
39 #include "ETag.h"
40 #include "event.h"
41 #include "fde.h"
42 #include "Store.h"
43 #include "mgr/Registration.h"
44 #include "StoreClient.h"
45 #include "stmem.h"
46 #include "HttpReply.h"
47 #include "HttpRequest.h"
48 #include "MemObject.h"
49 #include "mem_node.h"
50 #include "StatCounters.h"
51 #include "StoreMeta.h"
52 #include "SwapDir.h"
53 #include "StoreIOState.h"
54 #if USE_DELAY_POOLS
55 #include "DelayPools.h"
56 #endif
57 #include "Stack.h"
58 #include "SquidTime.h"
59 #include "swap_log_op.h"
60 #include "mgr/StoreIoAction.h"
61
62 static STMCB storeWriteComplete;
63
64 #define REBUILD_TIMESTAMP_DELTA_MAX 2
65
66 #define STORE_IN_MEM_BUCKETS (229)
67
68
69 /** \todo Convert these string constants to enum string-arrays generated */
70
71 const char *memStatusStr[] = {
72 "NOT_IN_MEMORY",
73 "IN_MEMORY"
74 };
75
76 const char *pingStatusStr[] = {
77 "PING_NONE",
78 "PING_WAITING",
79 "PING_DONE"
80 };
81
82 const char *storeStatusStr[] = {
83 "STORE_OK",
84 "STORE_PENDING"
85 };
86
87 const char *swapStatusStr[] = {
88 "SWAPOUT_NONE",
89 "SWAPOUT_WRITING",
90 "SWAPOUT_DONE"
91 };
92
93
94 /*
95 * This defines an repl type
96 */
97
98 typedef struct _storerepl_entry storerepl_entry_t;
99
100 struct _storerepl_entry {
101 const char *typestr;
102 REMOVALPOLICYCREATE *create;
103 };
104
105 static storerepl_entry_t *storerepl_list = NULL;
106
107
108 /*
109 * local function prototypes
110 */
111 static int getKeyCounter(void);
112 static OBJH storeCheckCachableStats;
113 static EVH storeLateRelease;
114
115 /*
116 * local variables
117 */
118 static Stack<StoreEntry*> LateReleaseStack;
119 MemAllocator *StoreEntry::pool = NULL;
120
121 StorePointer Store::CurrentRoot = NULL;
122
123 void
124 Store::Root(Store * aRoot)
125 {
126 CurrentRoot = aRoot;
127 }
128
129 void
130 Store::Root(StorePointer aRoot)
131 {
132 Root(aRoot.getRaw());
133 }
134
135 void
136 Store::Stats(StoreEntry * output)
137 {
138 assert (output);
139 Root().stat(*output);
140 }
141
142 void
143 Store::create()
144 {}
145
146 void
147 Store::diskFull()
148 {}
149
150 void
151 Store::sync()
152 {}
153
154 void
155 Store::unlink (StoreEntry &anEntry)
156 {
157 fatal("Store::unlink on invalid Store\n");
158 }
159
160 void *
161 StoreEntry::operator new (size_t bytecount)
162 {
163 assert (bytecount == sizeof (StoreEntry));
164
165 if (!pool) {
166 pool = memPoolCreate ("StoreEntry", bytecount);
167 pool->setChunkSize(2048 * 1024);
168 }
169
170 return pool->alloc();
171 }
172
173 void
174 StoreEntry::operator delete (void *address)
175 {
176 pool->freeOne(address);
177 }
178
179 void
180 StoreEntry::makePublic()
181 {
182 /* This object can be cached for a long time */
183
184 if (EBIT_TEST(flags, ENTRY_CACHABLE))
185 setPublicKey();
186 }
187
188 void
189 StoreEntry::makePrivate()
190 {
191 /* This object should never be cached at all */
192 expireNow();
193 releaseRequest(); /* delete object when not used */
194 /* releaseRequest clears ENTRY_CACHABLE flag */
195 }
196
197 void
198 StoreEntry::cacheNegatively()
199 {
200 /* This object may be negatively cached */
201 negativeCache();
202
203 if (EBIT_TEST(flags, ENTRY_CACHABLE))
204 setPublicKey();
205 }
206
207 size_t
208 StoreEntry::inUseCount()
209 {
210 if (!pool)
211 return 0;
212 return pool->getInUseCount();
213 }
214
215 const char *
216 StoreEntry::getMD5Text() const
217 {
218 return storeKeyText((const cache_key *)key);
219 }
220
221 #include "comm.h"
222
223 void
224 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
225 {
226 StoreEntry *anEntry = (StoreEntry *)theContext;
227 anEntry->delayAwareRead(aRead.conn,
228 aRead.buf,
229 aRead.len,
230 aRead.callback);
231 }
232
233 void
234 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
235 {
236 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
237 /* sketch: readdeferer* = getdeferer.
238 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
239 */
240
241 if (amountToRead == 0) {
242 assert (mem_obj);
243 /* read ahead limit */
244 /* Perhaps these two calls should both live in MemObject */
245 #if USE_DELAY_POOLS
246 if (!mem_obj->readAheadPolicyCanRead()) {
247 #endif
248 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
249 return;
250 #if USE_DELAY_POOLS
251 }
252
253 /* delay id limit */
254 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
255 return;
256
257 #endif
258
259 }
260
261 if (fd_table[conn->fd].closing()) {
262 // Readers must have closing callbacks if they want to be notified. No
263 // readers appeared to care around 2009/12/14 as they skipped reading
264 // for other reasons. Closing may already be true at the delyaAwareRead
265 // call time or may happen while we wait after delayRead() above.
266 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
267 callback);
268 return; // the read callback will never be called
269 }
270
271 comm_read(conn, buf, amountToRead, callback);
272 }
273
274 size_t
275 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
276 {
277 if (mem_obj == NULL)
278 return aRange.end;
279
280 #if URL_CHECKSUM_DEBUG
281
282 mem_obj->checkUrlChecksum();
283
284 #endif
285
286 if (!mem_obj->readAheadPolicyCanRead())
287 return 0;
288
289 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
290 }
291
292 bool
293 StoreEntry::checkDeferRead(int fd) const
294 {
295 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
296 }
297
298 void
299 StoreEntry::setNoDelay (bool const newValue)
300 {
301 if (mem_obj)
302 mem_obj->setNoDelay(newValue);
303 }
304
305 store_client_t
306 StoreEntry::storeClientType() const
307 {
308 /* The needed offset isn't in memory
309 * XXX TODO: this is wrong for range requests
310 * as the needed offset may *not* be 0, AND
311 * offset 0 in the memory object is the HTTP headers.
312 */
313
314 if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
315 // clients of an object cached in shared memory are memory clients
316 return STORE_MEM_CLIENT;
317 }
318
319 assert(mem_obj);
320
321 if (mem_obj->inmem_lo)
322 return STORE_DISK_CLIENT;
323
324 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
325 /* I don't think we should be adding clients to aborted entries */
326 debugs(20, 1, "storeClientType: adding to ENTRY_ABORTED entry");
327 return STORE_MEM_CLIENT;
328 }
329
330 if (store_status == STORE_OK) {
331 /* the object has completed. */
332
333 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
334 if (swap_status == SWAPOUT_DONE) {
335 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
336 if (mem_obj->endOffset() == mem_obj->object_sz) {
337 /* hot object fully swapped in */
338 return STORE_MEM_CLIENT;
339 }
340 } else {
341 /* Memory-only, or currently being swapped out */
342 return STORE_MEM_CLIENT;
343 }
344 }
345 return STORE_DISK_CLIENT;
346 }
347
348 /* here and past, entry is STORE_PENDING */
349 /*
350 * If this is the first client, let it be the mem client
351 */
352 if (mem_obj->nclients == 1)
353 return STORE_MEM_CLIENT;
354
355 /*
356 * If there is no disk file to open yet, we must make this a
357 * mem client. If we can't open the swapin file before writing
358 * to the client, there is no guarantee that we will be able
359 * to open it later when we really need it.
360 */
361 if (swap_status == SWAPOUT_NONE)
362 return STORE_MEM_CLIENT;
363
364 /*
365 * otherwise, make subsequent clients read from disk so they
366 * can not delay the first, and vice-versa.
367 */
368 return STORE_DISK_CLIENT;
369 }
370
371 StoreEntry::StoreEntry():
372 hidden_mem_obj(NULL),
373 swap_file_sz(0)
374 {
375 debugs(20, 3, HERE << "new StoreEntry " << this);
376 mem_obj = NULL;
377
378 expires = lastmod = lastref = timestamp = -1;
379
380 swap_status = SWAPOUT_NONE;
381 swap_filen = -1;
382 swap_dirn = -1;
383 }
384
385 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl):
386 hidden_mem_obj(NULL),
387 swap_file_sz(0)
388 {
389 debugs(20, 3, HERE << "new StoreEntry " << this);
390 mem_obj = new MemObject(aUrl, aLogUrl);
391
392 expires = lastmod = lastref = timestamp = -1;
393
394 swap_status = SWAPOUT_NONE;
395 swap_filen = -1;
396 swap_dirn = -1;
397 }
398
399 StoreEntry::~StoreEntry()
400 {
401 if (swap_filen >= 0) {
402 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
403 sd.disconnect(*this);
404 }
405 delete hidden_mem_obj;
406 }
407
408 #if USE_ADAPTATION
409 void
410 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
411 {
412 if (!deferredProducer)
413 deferredProducer = producer;
414 else
415 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
416 *deferredProducer << ", requested call: " << *producer);
417 }
418
419 void
420 StoreEntry::kickProducer()
421 {
422 if (deferredProducer != NULL) {
423 ScheduleCallHere(deferredProducer);
424 deferredProducer = NULL;
425 }
426 }
427 #endif
428
429 void
430 StoreEntry::destroyMemObject()
431 {
432 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
433 setMemStatus(NOT_IN_MEMORY);
434 MemObject *mem = mem_obj;
435 mem_obj = NULL;
436 delete mem;
437 delete hidden_mem_obj;
438 hidden_mem_obj = NULL;
439 }
440
441 void
442 StoreEntry::hideMemObject()
443 {
444 debugs(20, 3, HERE << "hiding " << mem_obj);
445 assert(mem_obj);
446 assert(!hidden_mem_obj);
447 hidden_mem_obj = mem_obj;
448 mem_obj = NULL;
449 }
450
451 void
452 destroyStoreEntry(void *data)
453 {
454 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
455 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
456 assert(e != NULL);
457
458 if (e == NullStoreEntry::getInstance())
459 return;
460
461 e->destroyMemObject();
462
463 e->hashDelete();
464
465 assert(e->key == NULL);
466
467 delete e;
468 }
469
470 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
471
472 void
473 StoreEntry::hashInsert(const cache_key * someKey)
474 {
475 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
476 key = storeKeyDup(someKey);
477 hash_join(store_table, this);
478 }
479
480 void
481 StoreEntry::hashDelete()
482 {
483 hash_remove_link(store_table, this);
484 storeKeyFree((const cache_key *)key);
485 key = NULL;
486 }
487
488 /* -------------------------------------------------------------------------- */
489
490
491 /* get rid of memory copy of the object */
492 void
493 StoreEntry::purgeMem()
494 {
495 if (mem_obj == NULL)
496 return;
497
498 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
499
500 destroyMemObject();
501
502 if (swap_status != SWAPOUT_DONE)
503 release();
504 }
505
506 /* RBC 20050104 this is wrong- memory ref counting
507 * is not at all equivalent to the store 'usage' concept
508 * which the replacement policies should be acting upon.
509 * specifically, object iteration within stores needs
510 * memory ref counting to prevent race conditions,
511 * but this should not influence store replacement.
512 */
513 void
514
515 StoreEntry::lock()
516 {
517 ++lock_count;
518 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
519 lock_count );
520 lastref = squid_curtime;
521 Store::Root().reference(*this);
522 }
523
524 void
525 StoreEntry::setReleaseFlag()
526 {
527 if (EBIT_TEST(flags, RELEASE_REQUEST))
528 return;
529
530 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
531
532 EBIT_SET(flags, RELEASE_REQUEST);
533 }
534
535 void
536 StoreEntry::releaseRequest()
537 {
538 if (EBIT_TEST(flags, RELEASE_REQUEST))
539 return;
540
541 setReleaseFlag();
542
543 /*
544 * Clear cachable flag here because we might get called before
545 * anyone else even looks at the cachability flag. Also, this
546 * prevents httpMakePublic from really setting a public key.
547 */
548 EBIT_CLR(flags, ENTRY_CACHABLE);
549
550 setPrivateKey();
551 }
552
553 /* unlock object, return -1 if object get released after unlock
554 * otherwise lock_count */
555 int
556 StoreEntry::unlock()
557 {
558 lock_count--;
559 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
560
561 if (lock_count)
562 return (int) lock_count;
563
564 if (store_status == STORE_PENDING)
565 setReleaseFlag();
566
567 assert(storePendingNClients(this) == 0);
568
569 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
570 this->release();
571 return 0;
572 }
573
574 if (EBIT_TEST(flags, KEY_PRIVATE))
575 debugs(20, 1, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
576
577 Store::Root().handleIdleEntry(*this); // may delete us
578 return 0;
579 }
580
581 void
582 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
583 {
584 assert (aClient);
585 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
586
587 if (!result)
588 aClient->created (NullStoreEntry::getInstance());
589 else
590 aClient->created (result);
591 }
592
593 void
594 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
595 {
596 assert (aClient);
597 StoreEntry *result = storeGetPublicByRequest (request);
598
599 if (!result)
600 result = NullStoreEntry::getInstance();
601
602 aClient->created (result);
603 }
604
605 void
606 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
607 {
608 assert (aClient);
609 StoreEntry *result = storeGetPublic (uri, method);
610
611 if (!result)
612 result = NullStoreEntry::getInstance();
613
614 aClient->created (result);
615 }
616
617 StoreEntry *
618 storeGetPublic(const char *uri, const HttpRequestMethod& method)
619 {
620 return Store::Root().get(storeKeyPublic(uri, method));
621 }
622
623 StoreEntry *
624 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
625 {
626 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
627 }
628
629 StoreEntry *
630 storeGetPublicByRequest(HttpRequest * req)
631 {
632 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
633
634 if (e == NULL && req->method == METHOD_HEAD)
635 /* We can generate a HEAD reply from a cached GET object */
636 e = storeGetPublicByRequestMethod(req, METHOD_GET);
637
638 return e;
639 }
640
641 static int
642 getKeyCounter(void)
643 {
644 static int key_counter = 0;
645
646 if (++key_counter < 0)
647 key_counter = 1;
648
649 return key_counter;
650 }
651
652 /* RBC 20050104 AFAICT this should become simpler:
653 * rather than reinserting with a special key it should be marked
654 * as 'released' and then cleaned up when refcounting indicates.
655 * the StoreHashIndex could well implement its 'released' in the
656 * current manner.
657 * Also, clean log writing should skip over ia,t
658 * Otherwise, we need a 'remove from the index but not the store
659 * concept'.
660 */
661 void
662 StoreEntry::setPrivateKey()
663 {
664 const cache_key *newkey;
665
666 if (key && EBIT_TEST(flags, KEY_PRIVATE))
667 return; /* is already private */
668
669 if (key) {
670 if (swap_filen > -1)
671 storeDirSwapLog(this, SWAP_LOG_DEL);
672
673 hashDelete();
674 }
675
676 if (mem_obj != NULL) {
677 mem_obj->id = getKeyCounter();
678 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
679 } else {
680 newkey = storeKeyPrivate("JUNK", METHOD_NONE, getKeyCounter());
681 }
682
683 assert(hash_lookup(store_table, newkey) == NULL);
684 EBIT_SET(flags, KEY_PRIVATE);
685 hashInsert(newkey);
686 }
687
688 void
689 StoreEntry::setPublicKey()
690 {
691 StoreEntry *e2 = NULL;
692 const cache_key *newkey;
693
694 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
695 return; /* is already public */
696
697 assert(mem_obj);
698
699 /*
700 * We can't make RELEASE_REQUEST objects public. Depending on
701 * when RELEASE_REQUEST gets set, we might not be swapping out
702 * the object. If we're not swapping out, then subsequent
703 * store clients won't be able to access object data which has
704 * been freed from memory.
705 *
706 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
707 * be set, and StoreEntry::setPublicKey() should not be called.
708 */
709 #if MORE_DEBUG_OUTPUT
710
711 if (EBIT_TEST(flags, RELEASE_REQUEST))
712 debugs(20, 1, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
713
714 #endif
715
716 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
717
718 if (mem_obj->request) {
719 HttpRequest *request = mem_obj->request;
720
721 if (!mem_obj->vary_headers) {
722 /* First handle the case where the object no longer varies */
723 safe_free(request->vary_headers);
724 } else {
725 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
726 /* Oops.. the variance has changed. Kill the base object
727 * to record the new variance key
728 */
729 safe_free(request->vary_headers); /* free old "bad" variance key */
730 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
731
732 if (pe)
733 pe->release();
734 }
735
736 /* Make sure the request knows the variance status */
737 if (!request->vary_headers) {
738 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
739
740 if (vary)
741 request->vary_headers = xstrdup(vary);
742 }
743 }
744
745 // TODO: storeGetPublic() calls below may create unlocked entries.
746 // We should add/use storeHas() API or lock/unlock those entries.
747 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
748 /* Create "vary" base object */
749 String vary;
750 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
751 /* We are allowed to do this typecast */
752 HttpReply *rep = new HttpReply;
753 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
754 vary = mem_obj->getReply()->header.getList(HDR_VARY);
755
756 if (vary.size()) {
757 /* Again, we own this structure layout */
758 rep->header.putStr(HDR_VARY, vary.termedBuf());
759 vary.clean();
760 }
761
762 #if X_ACCELERATOR_VARY
763 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
764
765 if (vary.defined()) {
766 /* Again, we own this structure layout */
767 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
768 vary.clean();
769 }
770
771 #endif
772 pe->replaceHttpReply(rep);
773
774 pe->timestampsSet();
775
776 pe->makePublic();
777
778 pe->complete();
779
780 pe->unlock();
781 }
782
783 newkey = storeKeyPublicByRequest(mem_obj->request);
784 } else
785 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
786
787 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
788 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
789 e2->setPrivateKey();
790 e2->release();
791
792 if (mem_obj->request)
793 newkey = storeKeyPublicByRequest(mem_obj->request);
794 else
795 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
796 }
797
798 if (key)
799 hashDelete();
800
801 EBIT_CLR(flags, KEY_PRIVATE);
802
803 hashInsert(newkey);
804
805 if (swap_filen > -1)
806 storeDirSwapLog(this, SWAP_LOG_ADD);
807 }
808
809 StoreEntry *
810 storeCreateEntry(const char *url, const char *log_url, request_flags flags, const HttpRequestMethod& method)
811 {
812 StoreEntry *e = NULL;
813 MemObject *mem = NULL;
814 debugs(20, 3, "storeCreateEntry: '" << url << "'");
815
816 e = new StoreEntry(url, log_url);
817 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
818 mem = e->mem_obj;
819 mem->method = method;
820
821 if (neighbors_do_private_keys || !flags.hierarchical)
822 e->setPrivateKey();
823 else
824 e->setPublicKey();
825
826 if (flags.cachable) {
827 EBIT_SET(e->flags, ENTRY_CACHABLE);
828 EBIT_CLR(e->flags, RELEASE_REQUEST);
829 } else {
830 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
831 e->releaseRequest();
832 }
833
834 e->store_status = STORE_PENDING;
835 e->setMemStatus(NOT_IN_MEMORY);
836 e->refcount = 0;
837 e->lastref = squid_curtime;
838 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
839 e->ping_status = PING_NONE;
840 EBIT_SET(e->flags, ENTRY_VALIDATED);
841 return e;
842 }
843
844 /* Mark object as expired */
845 void
846 StoreEntry::expireNow()
847 {
848 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
849 expires = squid_curtime;
850 }
851
852 void
853 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
854 {
855 PROF_start(storeWriteComplete);
856 StoreEntry *e = (StoreEntry *)data;
857
858 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
859 PROF_stop(storeWriteComplete);
860 return;
861 }
862
863 e->invokeHandlers();
864 PROF_stop(storeWriteComplete);
865 }
866
867 void
868 StoreEntry::write (StoreIOBuffer writeBuffer)
869 {
870 assert(mem_obj != NULL);
871 /* This assert will change when we teach the store to update */
872 PROF_start(StoreEntry_write);
873 assert(store_status == STORE_PENDING);
874
875 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
876 PROF_stop(StoreEntry_write);
877 storeGetMemSpace(writeBuffer.length);
878 mem_obj->write (writeBuffer, storeWriteComplete, this);
879 }
880
881 /* Append incoming data from a primary server to an entry. */
882 void
883 StoreEntry::append(char const *buf, int len)
884 {
885 assert(mem_obj != NULL);
886 assert(len >= 0);
887 assert(store_status == STORE_PENDING);
888
889 StoreIOBuffer tempBuffer;
890 tempBuffer.data = (char *)buf;
891 tempBuffer.length = len;
892 /*
893 * XXX sigh, offset might be < 0 here, but it gets "corrected"
894 * later. This offset crap is such a mess.
895 */
896 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
897 write(tempBuffer);
898 }
899
900
901 void
902 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
903 {
904 va_list args;
905 va_start(args, fmt);
906
907 storeAppendVPrintf(e, fmt, args);
908 va_end(args);
909 }
910
911 /* used be storeAppendPrintf and Packer */
912 void
913 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
914 {
915 LOCAL_ARRAY(char, buf, 4096);
916 buf[0] = '\0';
917 vsnprintf(buf, 4096, fmt, vargs);
918 e->append(buf, strlen(buf));
919 }
920
921 struct _store_check_cachable_hist {
922
923 struct {
924 int non_get;
925 int not_entry_cachable;
926 int wrong_content_length;
927 int negative_cached;
928 int too_big;
929 int too_small;
930 int private_key;
931 int too_many_open_files;
932 int too_many_open_fds;
933 } no;
934
935 struct {
936 int Default;
937 } yes;
938 } store_check_cachable_hist;
939
940 int
941 storeTooManyDiskFilesOpen(void)
942 {
943 if (Config.max_open_disk_fds == 0)
944 return 0;
945
946 if (store_open_disk_fd > Config.max_open_disk_fds)
947 return 1;
948
949 return 0;
950 }
951
952 int
953 StoreEntry::checkTooSmall()
954 {
955 if (EBIT_TEST(flags, ENTRY_SPECIAL))
956 return 0;
957
958 if (STORE_OK == store_status)
959 if (mem_obj->object_sz < 0 ||
960 mem_obj->object_sz < Config.Store.minObjectSize)
961 return 1;
962 if (getReply()->content_length > -1)
963 if (getReply()->content_length < Config.Store.minObjectSize)
964 return 1;
965 return 0;
966 }
967
968 // TODO: remove checks already performed by swapoutPossible()
969 // TODO: move "too many open..." checks outside -- we are called too early/late
970 int
971 StoreEntry::checkCachable()
972 {
973 #if CACHE_ALL_METHODS
974
975 if (mem_obj->method != METHOD_GET) {
976 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
977 ++store_check_cachable_hist.no.non_get;
978 } else
979 #endif
980 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
981 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
982 ++store_check_cachable_hist.no.wrong_content_length;
983 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
984 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
985 ++store_check_cachable_hist.no.not_entry_cachable;
986 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
987 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
988 ++store_check_cachable_hist.no.negative_cached;
989 return 0; /* avoid release call below */
990 } else if ((getReply()->content_length > 0 &&
991 getReply()->content_length
992 > Config.Store.maxObjectSize) ||
993 mem_obj->endOffset() > Config.Store.maxObjectSize) {
994 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
995 ++store_check_cachable_hist.no.too_big;
996 } else if (getReply()->content_length > Config.Store.maxObjectSize) {
997 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
998 ++store_check_cachable_hist.no.too_big;
999 } else if (checkTooSmall()) {
1000 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1001 ++store_check_cachable_hist.no.too_small;
1002 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1003 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1004 ++store_check_cachable_hist.no.private_key;
1005 } else if (swap_status != SWAPOUT_NONE) {
1006 /*
1007 * here we checked the swap_status because the remaining
1008 * cases are only relevant only if we haven't started swapping
1009 * out the object yet.
1010 */
1011 return 1;
1012 } else if (storeTooManyDiskFilesOpen()) {
1013 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1014 ++store_check_cachable_hist.no.too_many_open_files;
1015 } else if (fdNFree() < RESERVED_FD) {
1016 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1017 ++store_check_cachable_hist.no.too_many_open_fds;
1018 } else {
1019 ++store_check_cachable_hist.yes.Default;
1020 return 1;
1021 }
1022
1023 releaseRequest();
1024 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1025 return 0;
1026 }
1027
1028 void
1029 storeCheckCachableStats(StoreEntry *sentry)
1030 {
1031 storeAppendPrintf(sentry, "Category\t Count\n");
1032
1033 #if CACHE_ALL_METHODS
1034
1035 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1036 store_check_cachable_hist.no.non_get);
1037 #endif
1038
1039 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1040 store_check_cachable_hist.no.not_entry_cachable);
1041 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1042 store_check_cachable_hist.no.wrong_content_length);
1043 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1044 store_check_cachable_hist.no.negative_cached);
1045 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1046 store_check_cachable_hist.no.too_big);
1047 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1048 store_check_cachable_hist.no.too_small);
1049 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1050 store_check_cachable_hist.no.private_key);
1051 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1052 store_check_cachable_hist.no.too_many_open_files);
1053 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1054 store_check_cachable_hist.no.too_many_open_fds);
1055 storeAppendPrintf(sentry, "yes.default\t%d\n",
1056 store_check_cachable_hist.yes.Default);
1057 }
1058
1059 void
1060 StoreEntry::complete()
1061 {
1062 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1063
1064 if (store_status != STORE_PENDING) {
1065 /*
1066 * if we're not STORE_PENDING, then probably we got aborted
1067 * and there should be NO clients on this entry
1068 */
1069 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1070 assert(mem_obj->nclients == 0);
1071 return;
1072 }
1073
1074 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1075 * in use of object_sz?
1076 */
1077 mem_obj->object_sz = mem_obj->endOffset();
1078
1079 store_status = STORE_OK;
1080
1081 assert(mem_status == NOT_IN_MEMORY);
1082
1083 if (!validLength()) {
1084 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1085 releaseRequest();
1086 }
1087
1088 #if USE_CACHE_DIGESTS
1089 if (mem_obj->request)
1090 mem_obj->request->hier.store_complete_stop = current_time;
1091
1092 #endif
1093 /*
1094 * We used to call invokeHandlers, then storeSwapOut. However,
1095 * Madhukar Reddy <myreddy@persistence.com> reported that
1096 * responses without content length would sometimes get released
1097 * in client_side, thinking that the response is incomplete.
1098 */
1099 invokeHandlers();
1100 }
1101
1102 /*
1103 * Someone wants to abort this transfer. Set the reason in the
1104 * request structure, call the server-side callback and mark the
1105 * entry for releasing
1106 */
1107 void
1108 StoreEntry::abort()
1109 {
1110 ++statCounter.aborted_requests;
1111 assert(store_status == STORE_PENDING);
1112 assert(mem_obj != NULL);
1113 debugs(20, 6, "storeAbort: " << getMD5Text());
1114
1115 lock(); /* lock while aborting */
1116 negativeCache();
1117
1118 releaseRequest();
1119
1120 EBIT_SET(flags, ENTRY_ABORTED);
1121
1122 setMemStatus(NOT_IN_MEMORY);
1123
1124 store_status = STORE_OK;
1125
1126 /* Notify the server side */
1127
1128 /*
1129 * DPW 2007-05-07
1130 * Should we check abort.data for validity?
1131 */
1132 if (mem_obj->abort.callback) {
1133 if (!cbdataReferenceValid(mem_obj->abort.data))
1134 debugs(20,1,HERE << "queueing event when abort.data is not valid");
1135 eventAdd("mem_obj->abort.callback",
1136 mem_obj->abort.callback,
1137 mem_obj->abort.data,
1138 0.0,
1139 true);
1140 unregisterAbort();
1141 }
1142
1143 /* XXX Should we reverse these two, so that there is no
1144 * unneeded disk swapping triggered?
1145 */
1146 /* Notify the client side */
1147 invokeHandlers();
1148
1149 // abort swap out, invalidating what was created so far (release follows)
1150 swapOutFileClose(StoreIOState::writerGone);
1151
1152 unlock(); /* unlock */
1153 }
1154
1155 /**
1156 * Clear Memory storage to accommodate the given object len
1157 */
1158 void
1159 storeGetMemSpace(int size)
1160 {
1161 PROF_start(storeGetMemSpace);
1162 StoreEntry *e = NULL;
1163 int released = 0;
1164 static time_t last_check = 0;
1165 size_t pages_needed;
1166 RemovalPurgeWalker *walker;
1167
1168 if (squid_curtime == last_check) {
1169 PROF_stop(storeGetMemSpace);
1170 return;
1171 }
1172
1173 last_check = squid_curtime;
1174
1175 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1176
1177 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1178 PROF_stop(storeGetMemSpace);
1179 return;
1180 }
1181
1182 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1183 " pages");
1184
1185 /* XXX what to set as max_scan here? */
1186 walker = mem_policy->PurgeInit(mem_policy, 100000);
1187
1188 while ((e = walker->Next(walker))) {
1189 e->purgeMem();
1190 ++released;
1191
1192 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1193 break;
1194 }
1195
1196 walker->Done(walker);
1197 debugs(20, 3, "storeGetMemSpace stats:");
1198 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1199 debugs(20, 3, " " << std::setw(6) << released << " were released");
1200 PROF_stop(storeGetMemSpace);
1201 }
1202
1203
1204 /* thunk through to Store::Root().maintain(). Note that this would be better still
1205 * if registered against the root store itself, but that requires more complex
1206 * update logic - bigger fish to fry first. Long term each store when
1207 * it becomes active will self register
1208 */
1209 void
1210 Store::Maintain(void *notused)
1211 {
1212 Store::Root().maintain();
1213
1214 /* Reregister a maintain event .. */
1215 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1216
1217 }
1218
1219 /* The maximum objects to scan for maintain storage space */
1220 #define MAINTAIN_MAX_SCAN 1024
1221 #define MAINTAIN_MAX_REMOVE 64
1222
1223 /*
1224 * This routine is to be called by main loop in main.c.
1225 * It removes expired objects on only one bucket for each time called.
1226 *
1227 * This should get called 1/s from main().
1228 */
1229 void
1230 StoreController::maintain()
1231 {
1232 static time_t last_warn_time = 0;
1233
1234 PROF_start(storeMaintainSwapSpace);
1235 swapDir->maintain();
1236
1237 /* this should be emitted by the oversize dir, not globally */
1238
1239 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1240 if (squid_curtime - last_warn_time > 10) {
1241 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1242 << Store::Root().currentSize() / 1024.0 << " KB > "
1243 << (Store::Root().maxSize() >> 10) << " KB");
1244 last_warn_time = squid_curtime;
1245 }
1246 }
1247
1248 PROF_stop(storeMaintainSwapSpace);
1249 }
1250
1251 /* release an object from a cache */
1252 void
1253 StoreEntry::release()
1254 {
1255 PROF_start(storeRelease);
1256 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1257 /* If, for any reason we can't discard this object because of an
1258 * outstanding request, mark it for pending release */
1259
1260 if (locked()) {
1261 expireNow();
1262 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1263 releaseRequest();
1264 PROF_stop(storeRelease);
1265 return;
1266 }
1267
1268 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1269 setPrivateKey();
1270
1271 if (mem_obj)
1272 destroyMemObject();
1273
1274 if (swap_filen > -1) {
1275 /*
1276 * Fake a call to StoreEntry->lock() When rebuilding is done,
1277 * we'll just call StoreEntry->unlock() on these.
1278 */
1279 ++lock_count;
1280 setReleaseFlag();
1281 LateReleaseStack.push_back(this);
1282 } else {
1283 destroyStoreEntry(static_cast<hash_link *>(this));
1284 // "this" is no longer valid
1285 }
1286
1287 PROF_stop(storeRelease);
1288 return;
1289 }
1290
1291 storeLog(STORE_LOG_RELEASE, this);
1292
1293 if (swap_filen > -1) {
1294 // log before unlink() below clears swap_filen
1295 if (!EBIT_TEST(flags, KEY_PRIVATE))
1296 storeDirSwapLog(this, SWAP_LOG_DEL);
1297
1298 unlink();
1299 }
1300
1301 setMemStatus(NOT_IN_MEMORY);
1302 destroyStoreEntry(static_cast<hash_link *>(this));
1303 PROF_stop(storeRelease);
1304 }
1305
1306 static void
1307 storeLateRelease(void *unused)
1308 {
1309 StoreEntry *e;
1310 int i;
1311 static int n = 0;
1312
1313 if (StoreController::store_dirs_rebuilding) {
1314 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1315 return;
1316 }
1317
1318 for (i = 0; i < 10; ++i) {
1319 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1320
1321 if (e == NULL) {
1322 /* done! */
1323 debugs(20, 1, "storeLateRelease: released " << n << " objects");
1324 return;
1325 }
1326
1327 e->unlock();
1328 ++n;
1329 }
1330
1331 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1332 }
1333
1334 /* return 1 if a store entry is locked */
1335 int
1336 StoreEntry::locked() const
1337 {
1338 if (lock_count)
1339 return 1;
1340
1341 if (swap_status == SWAPOUT_WRITING)
1342 return 1;
1343
1344 if (store_status == STORE_PENDING)
1345 return 1;
1346
1347 /*
1348 * SPECIAL, PUBLIC entries should be "locked"
1349 */
1350 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1351 if (!EBIT_TEST(flags, KEY_PRIVATE))
1352 return 1;
1353
1354 return 0;
1355 }
1356
1357 bool
1358 StoreEntry::validLength() const
1359 {
1360 int64_t diff;
1361 const HttpReply *reply;
1362 assert(mem_obj != NULL);
1363 reply = getReply();
1364 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1365 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1366 objectLen());
1367 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1368 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1369
1370 if (reply->content_length < 0) {
1371 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1372 return 1;
1373 }
1374
1375 if (reply->hdr_sz == 0) {
1376 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1377 return 1;
1378 }
1379
1380 if (mem_obj->method == METHOD_HEAD) {
1381 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1382 return 1;
1383 }
1384
1385 if (reply->sline.status == HTTP_NOT_MODIFIED)
1386 return 1;
1387
1388 if (reply->sline.status == HTTP_NO_CONTENT)
1389 return 1;
1390
1391 diff = reply->hdr_sz + reply->content_length - objectLen();
1392
1393 if (diff == 0)
1394 return 1;
1395
1396 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1397
1398 return 0;
1399 }
1400
1401 static void
1402 storeRegisterWithCacheManager(void)
1403 {
1404 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1405 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1406 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1407 storeCheckCachableStats, 0, 1);
1408 }
1409
1410 void
1411 storeInit(void)
1412 {
1413 storeKeyInit();
1414 mem_policy = createRemovalPolicy(Config.memPolicy);
1415 storeDigestInit();
1416 storeLogOpen();
1417 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1418 Store::Root().init();
1419 storeRebuildStart();
1420
1421 storeRegisterWithCacheManager();
1422 }
1423
1424 void
1425 storeConfigure(void)
1426 {
1427 store_swap_high = (long) (((float) Store::Root().maxSize() *
1428 (float) Config.Swap.highWaterMark) / (float) 100);
1429 store_swap_low = (long) (((float) Store::Root().maxSize() *
1430 (float) Config.Swap.lowWaterMark) / (float) 100);
1431 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1432 }
1433
1434 bool
1435 StoreEntry::memoryCachable() const
1436 {
1437 if (mem_obj == NULL)
1438 return 0;
1439
1440 if (mem_obj->data_hdr.size() == 0)
1441 return 0;
1442
1443 if (mem_obj->inmem_lo != 0)
1444 return 0;
1445
1446 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1447 return 0;
1448
1449 return 1;
1450 }
1451
1452 int
1453 StoreEntry::checkNegativeHit() const
1454 {
1455 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1456 return 0;
1457
1458 if (expires <= squid_curtime)
1459 return 0;
1460
1461 if (store_status != STORE_OK)
1462 return 0;
1463
1464 return 1;
1465 }
1466
1467 /**
1468 * Set object for negative caching.
1469 * Preserves any expiry information given by the server.
1470 * In absence of proper expiry info it will set to expire immediately,
1471 * or with HTTP-violations enabled the configured negative-TTL is observed
1472 */
1473 void
1474 StoreEntry::negativeCache()
1475 {
1476 // XXX: should make the default for expires 0 instead of -1
1477 // so we can distinguish "Expires: -1" from nothing.
1478 if (expires <= 0)
1479 #if USE_HTTP_VIOLATIONS
1480 expires = squid_curtime + Config.negativeTtl;
1481 #else
1482 expires = squid_curtime;
1483 #endif
1484 EBIT_SET(flags, ENTRY_NEGCACHED);
1485 }
1486
1487 void
1488 storeFreeMemory(void)
1489 {
1490 Store::Root(NULL);
1491 #if USE_CACHE_DIGESTS
1492
1493 if (store_digest)
1494 cacheDigestDestroy(store_digest);
1495
1496 #endif
1497
1498 store_digest = NULL;
1499 }
1500
1501 int
1502 expiresMoreThan(time_t expires, time_t when)
1503 {
1504 if (expires < 0) /* No Expires given */
1505 return 1;
1506
1507 return (expires > (squid_curtime + when));
1508 }
1509
1510 int
1511 StoreEntry::validToSend() const
1512 {
1513 if (EBIT_TEST(flags, RELEASE_REQUEST))
1514 return 0;
1515
1516 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1517 if (expires <= squid_curtime)
1518 return 0;
1519
1520 if (EBIT_TEST(flags, ENTRY_ABORTED))
1521 return 0;
1522
1523 return 1;
1524 }
1525
1526 void
1527 StoreEntry::timestampsSet()
1528 {
1529 const HttpReply *reply = getReply();
1530 time_t served_date = reply->date;
1531 int age = reply->header.getInt(HDR_AGE);
1532 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1533 /* make sure that 0 <= served_date <= squid_curtime */
1534
1535 if (served_date < 0 || served_date > squid_curtime)
1536 served_date = squid_curtime;
1537
1538 /* Bug 1791:
1539 * If the returned Date: is more than 24 hours older than
1540 * the squid_curtime, then one of us needs to use NTP to set our
1541 * clock. We'll pretend that our clock is right.
1542 */
1543 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1544 served_date = squid_curtime;
1545
1546 /*
1547 * Compensate with Age header if origin server clock is ahead
1548 * of us and there is a cache in between us and the origin
1549 * server. But DONT compensate if the age value is larger than
1550 * squid_curtime because it results in a negative served_date.
1551 */
1552 if (age > squid_curtime - served_date)
1553 if (squid_curtime > age)
1554 served_date = squid_curtime - age;
1555
1556 // compensate for Squid-to-server and server-to-Squid delays
1557 if (mem_obj && mem_obj->request) {
1558 const time_t request_sent =
1559 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1560 if (0 < request_sent && request_sent < squid_curtime)
1561 served_date -= (squid_curtime - request_sent);
1562 }
1563
1564 if (reply->expires > 0 && reply->date > -1)
1565 expires = served_date + (reply->expires - reply->date);
1566 else
1567 expires = reply->expires;
1568
1569 lastmod = reply->last_modified;
1570
1571 timestamp = served_date;
1572 }
1573
1574 void
1575 StoreEntry::registerAbort(STABH * cb, void *data)
1576 {
1577 assert(mem_obj);
1578 assert(mem_obj->abort.callback == NULL);
1579 mem_obj->abort.callback = cb;
1580 mem_obj->abort.data = cbdataReference(data);
1581 }
1582
1583 void
1584 StoreEntry::unregisterAbort()
1585 {
1586 assert(mem_obj);
1587 if (mem_obj->abort.callback) {
1588 mem_obj->abort.callback = NULL;
1589 cbdataReferenceDone(mem_obj->abort.data);
1590 }
1591 }
1592
1593 void
1594 StoreEntry::dump(int l) const
1595 {
1596 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1597 debugs(20, l, "StoreEntry->next: " << next);
1598 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1599 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1600 debugs(20, l, "StoreEntry->lastref: " << lastref);
1601 debugs(20, l, "StoreEntry->expires: " << expires);
1602 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1603 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1604 debugs(20, l, "StoreEntry->refcount: " << refcount);
1605 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1606 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1607 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1608 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1609 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1610 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1611 debugs(20, l, "StoreEntry->store_status: " << store_status);
1612 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1613 }
1614
1615 /*
1616 * NOTE, this function assumes only two mem states
1617 */
1618 void
1619 StoreEntry::setMemStatus(mem_status_t new_status)
1620 {
1621 if (new_status == mem_status)
1622 return;
1623
1624 // are we using a shared memory cache?
1625 if (Config.memShared && IamWorkerProcess()) {
1626 // enumerate calling cases if shared memory is enabled
1627 assert(new_status != IN_MEMORY || EBIT_TEST(flags, ENTRY_SPECIAL));
1628 // This method was designed to update replacement policy, not to
1629 // actually purge something from the memory cache (TODO: rename?).
1630 // Shared memory cache does not have a policy that needs updates.
1631 mem_status = new_status;
1632 return;
1633 }
1634
1635 assert(mem_obj != NULL);
1636
1637 if (new_status == IN_MEMORY) {
1638 assert(mem_obj->inmem_lo == 0);
1639
1640 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1641 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1642 } else {
1643 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1644 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1645 }
1646
1647 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1648 } else {
1649 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1650 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1651 } else {
1652 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1653 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1654 }
1655
1656 hot_obj_count--;
1657 }
1658
1659 mem_status = new_status;
1660 }
1661
1662 const char *
1663 StoreEntry::url() const
1664 {
1665 if (this == NULL)
1666 return "[null_entry]";
1667 else if (mem_obj == NULL)
1668 return "[null_mem_obj]";
1669 else
1670 return mem_obj->url;
1671 }
1672
1673 void
1674 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1675 {
1676 if (mem_obj)
1677 return;
1678
1679 if (hidden_mem_obj) {
1680 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1681 mem_obj = hidden_mem_obj;
1682 hidden_mem_obj = NULL;
1683 mem_obj->resetUrls(aUrl, aLogUrl);
1684 return;
1685 }
1686
1687 mem_obj = new MemObject(aUrl, aLogUrl);
1688 }
1689
1690 /* this just sets DELAY_SENDING */
1691 void
1692 StoreEntry::buffer()
1693 {
1694 EBIT_SET(flags, DELAY_SENDING);
1695 }
1696
1697 /* this just clears DELAY_SENDING and Invokes the handlers */
1698 void
1699 StoreEntry::flush()
1700 {
1701 if (EBIT_TEST(flags, DELAY_SENDING)) {
1702 EBIT_CLR(flags, DELAY_SENDING);
1703 invokeHandlers();
1704 }
1705 }
1706
1707 int64_t
1708 StoreEntry::objectLen() const
1709 {
1710 assert(mem_obj != NULL);
1711 return mem_obj->object_sz;
1712 }
1713
1714 int64_t
1715 StoreEntry::contentLen() const
1716 {
1717 assert(mem_obj != NULL);
1718 assert(getReply() != NULL);
1719 return objectLen() - getReply()->hdr_sz;
1720 }
1721
1722 HttpReply const *
1723 StoreEntry::getReply () const
1724 {
1725 if (NULL == mem_obj)
1726 return NULL;
1727
1728 return mem_obj->getReply();
1729 }
1730
1731 void
1732 StoreEntry::reset()
1733 {
1734 assert (mem_obj);
1735 debugs(20, 3, "StoreEntry::reset: " << url());
1736 mem_obj->reset();
1737 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1738 rep->reset();
1739 expires = lastmod = timestamp = -1;
1740 }
1741
1742 /*
1743 * storeFsInit
1744 *
1745 * This routine calls the SETUP routine for each fs type.
1746 * I don't know where the best place for this is, and I'm not going to shuffle
1747 * around large chunks of code right now (that can be done once its working.)
1748 */
1749 void
1750 storeFsInit(void)
1751 {
1752 storeReplSetup();
1753 }
1754
1755 /*
1756 * called to add another store removal policy module
1757 */
1758 void
1759 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1760 {
1761 int i;
1762
1763 /* find the number of currently known repl types */
1764 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1765 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1766 debugs(20, 1, "WARNING: Trying to load store replacement policy " << type << " twice.");
1767 return;
1768 }
1769 }
1770
1771 /* add the new type */
1772 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1773
1774 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1775
1776 storerepl_list[i].typestr = type;
1777
1778 storerepl_list[i].create = create;
1779 }
1780
1781 /*
1782 * Create a removal policy instance
1783 */
1784 RemovalPolicy *
1785 createRemovalPolicy(RemovalPolicySettings * settings)
1786 {
1787 storerepl_entry_t *r;
1788
1789 for (r = storerepl_list; r && r->typestr; ++r) {
1790 if (strcmp(r->typestr, settings->type) == 0)
1791 return r->create(settings->args);
1792 }
1793
1794 debugs(20, 1, "ERROR: Unknown policy " << settings->type);
1795 debugs(20, 1, "ERROR: Be sure to have set cache_replacement_policy");
1796 debugs(20, 1, "ERROR: and memory_replacement_policy in squid.conf!");
1797 fatalf("ERROR: Unknown policy %s\n", settings->type);
1798 return NULL; /* NOTREACHED */
1799 }
1800
1801 #if 0
1802 void
1803 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1804 {
1805 if (e->swap_file_number == filn)
1806 return;
1807
1808 if (filn < 0) {
1809 assert(-1 == filn);
1810 storeDirMapBitReset(e->swap_file_number);
1811 storeDirLRUDelete(e);
1812 e->swap_file_number = -1;
1813 } else {
1814 assert(-1 == e->swap_file_number);
1815 storeDirMapBitSet(e->swap_file_number = filn);
1816 storeDirLRUAdd(e);
1817 }
1818 }
1819
1820 #endif
1821
1822
1823 /*
1824 * Replace a store entry with
1825 * a new reply. This eats the reply.
1826 */
1827 void
1828 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1829 {
1830 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1831
1832 if (!mem_obj) {
1833 debugs(20, 0, "Attempt to replace object with no in-memory representation");
1834 return;
1835 }
1836
1837 mem_obj->replaceHttpReply(rep);
1838
1839 if (andStartWriting)
1840 startWriting();
1841 }
1842
1843
1844 void
1845 StoreEntry::startWriting()
1846 {
1847 Packer p;
1848
1849 /* TODO: when we store headers serparately remove the header portion */
1850 /* TODO: mark the length of the headers ? */
1851 /* We ONLY want the headers */
1852 packerToStoreInit(&p, this);
1853
1854 assert (isEmpty());
1855 assert(mem_obj);
1856
1857 const HttpReply *rep = getReply();
1858 assert(rep);
1859
1860 rep->packHeadersInto(&p);
1861 mem_obj->markEndOfReplyHeaders();
1862
1863 rep->body.packInto(&p);
1864
1865 packerClean(&p);
1866 }
1867
1868
1869 char const *
1870 StoreEntry::getSerialisedMetaData()
1871 {
1872 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1873 int swap_hdr_sz;
1874 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1875 storeSwapTLVFree(tlv_list);
1876 assert (swap_hdr_sz >= 0);
1877 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1878 return result;
1879 }
1880
1881 void
1882 StoreEntry::trimMemory(const bool preserveSwappable)
1883 {
1884 /*
1885 * DPW 2007-05-09
1886 * Bug #1943. We must not let go any data for IN_MEMORY
1887 * objects. We have to wait until the mem_status changes.
1888 */
1889 if (mem_status == IN_MEMORY)
1890 return;
1891
1892 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1893 return; // cannot trim because we do not load them again
1894
1895 if (!preserveSwappable) {
1896 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1897 /* Nothing to do */
1898 return;
1899 }
1900 /*
1901 * Its not swap-able, and we're about to delete a chunk,
1902 * so we must make it PRIVATE. This is tricky/ugly because
1903 * for the most part, we treat swapable == cachable here.
1904 */
1905 releaseRequest();
1906 mem_obj->trimUnSwappable ();
1907 } else {
1908 mem_obj->trimSwappable ();
1909 }
1910 }
1911
1912 bool
1913 StoreEntry::modifiedSince(HttpRequest * request) const
1914 {
1915 int object_length;
1916 time_t mod_time = lastmod;
1917
1918 if (mod_time < 0)
1919 mod_time = timestamp;
1920
1921 debugs(88, 3, "modifiedSince: '" << url() << "'");
1922
1923 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1924
1925 if (mod_time < 0)
1926 return true;
1927
1928 /* Find size of the object */
1929 object_length = getReply()->content_length;
1930
1931 if (object_length < 0)
1932 object_length = contentLen();
1933
1934 if (mod_time > request->ims) {
1935 debugs(88, 3, "--> YES: entry newer than client");
1936 return true;
1937 } else if (mod_time < request->ims) {
1938 debugs(88, 3, "--> NO: entry older than client");
1939 return false;
1940 } else if (request->imslen < 0) {
1941 debugs(88, 3, "--> NO: same LMT, no client length");
1942 return false;
1943 } else if (request->imslen == object_length) {
1944 debugs(88, 3, "--> NO: same LMT, same length");
1945 return false;
1946 } else {
1947 debugs(88, 3, "--> YES: same LMT, different length");
1948 return true;
1949 }
1950 }
1951
1952 bool
1953 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1954 {
1955 const String reqETags = request.header.getList(HDR_IF_MATCH);
1956 return hasOneOfEtags(reqETags, false);
1957 }
1958
1959 bool
1960 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1961 {
1962 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1963 // weak comparison is allowed only for HEAD or full-body GET requests
1964 const bool allowWeakMatch = !request.flags.range &&
1965 (request.method == METHOD_GET || request.method == METHOD_HEAD);
1966 return hasOneOfEtags(reqETags, allowWeakMatch);
1967 }
1968
1969 /// whether at least one of the request ETags matches entity ETag
1970 bool
1971 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1972 {
1973 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1974 if (!repETag.str)
1975 return strListIsMember(&reqETags, "*", ',');
1976
1977 bool matched = false;
1978 const char *pos = NULL;
1979 const char *item;
1980 int ilen;
1981 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1982 if (!strncmp(item, "*", ilen))
1983 matched = true;
1984 else {
1985 String str;
1986 str.append(item, ilen);
1987 ETag reqETag;
1988 if (etagParseInit(&reqETag, str.termedBuf())) {
1989 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1990 etagIsStrongEqual(repETag, reqETag);
1991 }
1992 }
1993 }
1994 return matched;
1995 }
1996
1997 SwapDir::Pointer
1998 StoreEntry::store() const
1999 {
2000 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2001 return INDEXSD(swap_dirn);
2002 }
2003
2004 void
2005 StoreEntry::unlink()
2006 {
2007 store()->unlink(*this); // implies disconnect()
2008 swap_filen = -1;
2009 swap_dirn = -1;
2010 swap_status = SWAPOUT_NONE;
2011 }
2012
2013 /*
2014 * return true if the entry is in a state where
2015 * it can accept more data (ie with write() method)
2016 */
2017 bool
2018 StoreEntry::isAccepting() const
2019 {
2020 if (STORE_PENDING != store_status)
2021 return false;
2022
2023 if (EBIT_TEST(flags, ENTRY_ABORTED))
2024 return false;
2025
2026 return true;
2027 }
2028
2029 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2030 {
2031 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2032 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2033 e.swap_status;
2034 }
2035
2036 /* NullStoreEntry */
2037
2038 NullStoreEntry NullStoreEntry::_instance;
2039
2040 NullStoreEntry *
2041 NullStoreEntry::getInstance()
2042 {
2043 return &_instance;
2044 }
2045
2046 char const *
2047 NullStoreEntry::getMD5Text() const
2048 {
2049 return "N/A";
2050 }
2051
2052 void
2053 NullStoreEntry::operator delete(void*)
2054 {
2055 fatal ("Attempt to delete NullStoreEntry\n");
2056 }
2057
2058 char const *
2059 NullStoreEntry::getSerialisedMetaData()
2060 {
2061 return NULL;
2062 }
2063
2064 #if !_USE_INLINE_
2065 #include "Store.cci"
2066 #endif