]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Completed protos.h split and code refactoring
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * DEBUG: section 20 Storage Manager
4 * AUTHOR: Harvest Derived
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "CacheDigest.h"
36 #include "CacheManager.h"
37 #include "comm/Connection.h"
38 #include "ETag.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "globals.h"
42 #include "http.h"
43 #include "HttpReply.h"
44 #include "HttpRequest.h"
45 #include "mem_node.h"
46 #include "MemObject.h"
47 #include "mgr/Registration.h"
48 #include "mgr/StoreIoAction.h"
49 #include "profiler/Profiler.h"
50 #include "repl_modules.h"
51 #include "SquidTime.h"
52 #include "Stack.h"
53 #include "StatCounters.h"
54 #include "stmem.h"
55 #include "store_digest.h"
56 #include "store_key_md5.h"
57 #include "store_key_md5.h"
58 #include "store_log.h"
59 #include "store_rebuild.h"
60 #include "Store.h"
61 #include "StoreClient.h"
62 #include "StoreIOState.h"
63 #include "StoreMeta.h"
64 #include "StrList.h"
65 #include "swap_log_op.h"
66 #include "SwapDir.h"
67 #include "tools.h"
68 #if USE_DELAY_POOLS
69 #include "DelayPools.h"
70 #endif
71 #if HAVE_LIMITS_H
72 #include <limits.h>
73 #endif
74
75 static STMCB storeWriteComplete;
76
77 #define REBUILD_TIMESTAMP_DELTA_MAX 2
78
79 #define STORE_IN_MEM_BUCKETS (229)
80
81 /** \todo Convert these string constants to enum string-arrays generated */
82
83 const char *memStatusStr[] = {
84 "NOT_IN_MEMORY",
85 "IN_MEMORY"
86 };
87
88 const char *pingStatusStr[] = {
89 "PING_NONE",
90 "PING_WAITING",
91 "PING_DONE"
92 };
93
94 const char *storeStatusStr[] = {
95 "STORE_OK",
96 "STORE_PENDING"
97 };
98
99 const char *swapStatusStr[] = {
100 "SWAPOUT_NONE",
101 "SWAPOUT_WRITING",
102 "SWAPOUT_DONE"
103 };
104
105 /*
106 * This defines an repl type
107 */
108
109 typedef struct _storerepl_entry storerepl_entry_t;
110
111 struct _storerepl_entry {
112 const char *typestr;
113 REMOVALPOLICYCREATE *create;
114 };
115
116 static storerepl_entry_t *storerepl_list = NULL;
117
118 /*
119 * local function prototypes
120 */
121 static int getKeyCounter(void);
122 static OBJH storeCheckCachableStats;
123 static EVH storeLateRelease;
124
125 /*
126 * local variables
127 */
128 static Stack<StoreEntry*> LateReleaseStack;
129 MemAllocator *StoreEntry::pool = NULL;
130
131 StorePointer Store::CurrentRoot = NULL;
132
133 void
134 Store::Root(Store * aRoot)
135 {
136 CurrentRoot = aRoot;
137 }
138
139 void
140 Store::Root(StorePointer aRoot)
141 {
142 Root(aRoot.getRaw());
143 }
144
145 void
146 Store::Stats(StoreEntry * output)
147 {
148 assert (output);
149 Root().stat(*output);
150 }
151
152 void
153 Store::create()
154 {}
155
156 void
157 Store::diskFull()
158 {}
159
160 void
161 Store::sync()
162 {}
163
164 void
165 Store::unlink (StoreEntry &anEntry)
166 {
167 fatal("Store::unlink on invalid Store\n");
168 }
169
170 void *
171 StoreEntry::operator new (size_t bytecount)
172 {
173 assert (bytecount == sizeof (StoreEntry));
174
175 if (!pool) {
176 pool = memPoolCreate ("StoreEntry", bytecount);
177 pool->setChunkSize(2048 * 1024);
178 }
179
180 return pool->alloc();
181 }
182
183 void
184 StoreEntry::operator delete (void *address)
185 {
186 pool->freeOne(address);
187 }
188
189 void
190 StoreEntry::makePublic()
191 {
192 /* This object can be cached for a long time */
193
194 if (EBIT_TEST(flags, ENTRY_CACHABLE))
195 setPublicKey();
196 }
197
198 void
199 StoreEntry::makePrivate()
200 {
201 /* This object should never be cached at all */
202 expireNow();
203 releaseRequest(); /* delete object when not used */
204 /* releaseRequest clears ENTRY_CACHABLE flag */
205 }
206
207 void
208 StoreEntry::cacheNegatively()
209 {
210 /* This object may be negatively cached */
211 negativeCache();
212
213 if (EBIT_TEST(flags, ENTRY_CACHABLE))
214 setPublicKey();
215 }
216
217 size_t
218 StoreEntry::inUseCount()
219 {
220 if (!pool)
221 return 0;
222 return pool->getInUseCount();
223 }
224
225 const char *
226 StoreEntry::getMD5Text() const
227 {
228 return storeKeyText((const cache_key *)key);
229 }
230
231 #include "comm.h"
232
233 void
234 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
235 {
236 StoreEntry *anEntry = (StoreEntry *)theContext;
237 anEntry->delayAwareRead(aRead.conn,
238 aRead.buf,
239 aRead.len,
240 aRead.callback);
241 }
242
243 void
244 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
245 {
246 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
247 /* sketch: readdeferer* = getdeferer.
248 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
249 */
250
251 if (amountToRead == 0) {
252 assert (mem_obj);
253 /* read ahead limit */
254 /* Perhaps these two calls should both live in MemObject */
255 #if USE_DELAY_POOLS
256 if (!mem_obj->readAheadPolicyCanRead()) {
257 #endif
258 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
259 return;
260 #if USE_DELAY_POOLS
261 }
262
263 /* delay id limit */
264 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
265 return;
266
267 #endif
268
269 }
270
271 if (fd_table[conn->fd].closing()) {
272 // Readers must have closing callbacks if they want to be notified. No
273 // readers appeared to care around 2009/12/14 as they skipped reading
274 // for other reasons. Closing may already be true at the delyaAwareRead
275 // call time or may happen while we wait after delayRead() above.
276 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
277 callback);
278 return; // the read callback will never be called
279 }
280
281 comm_read(conn, buf, amountToRead, callback);
282 }
283
284 size_t
285 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
286 {
287 if (mem_obj == NULL)
288 return aRange.end;
289
290 #if URL_CHECKSUM_DEBUG
291
292 mem_obj->checkUrlChecksum();
293
294 #endif
295
296 if (!mem_obj->readAheadPolicyCanRead())
297 return 0;
298
299 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
300 }
301
302 bool
303 StoreEntry::checkDeferRead(int fd) const
304 {
305 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
306 }
307
308 void
309 StoreEntry::setNoDelay (bool const newValue)
310 {
311 if (mem_obj)
312 mem_obj->setNoDelay(newValue);
313 }
314
315 store_client_t
316 StoreEntry::storeClientType() const
317 {
318 /* The needed offset isn't in memory
319 * XXX TODO: this is wrong for range requests
320 * as the needed offset may *not* be 0, AND
321 * offset 0 in the memory object is the HTTP headers.
322 */
323
324 if (mem_status == IN_MEMORY && Config.memShared && IamWorkerProcess()) {
325 // clients of an object cached in shared memory are memory clients
326 return STORE_MEM_CLIENT;
327 }
328
329 assert(mem_obj);
330
331 if (mem_obj->inmem_lo)
332 return STORE_DISK_CLIENT;
333
334 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
335 /* I don't think we should be adding clients to aborted entries */
336 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
337 return STORE_MEM_CLIENT;
338 }
339
340 if (store_status == STORE_OK) {
341 /* the object has completed. */
342
343 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
344 if (swap_status == SWAPOUT_DONE) {
345 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
346 if (mem_obj->endOffset() == mem_obj->object_sz) {
347 /* hot object fully swapped in */
348 return STORE_MEM_CLIENT;
349 }
350 } else {
351 /* Memory-only, or currently being swapped out */
352 return STORE_MEM_CLIENT;
353 }
354 }
355 return STORE_DISK_CLIENT;
356 }
357
358 /* here and past, entry is STORE_PENDING */
359 /*
360 * If this is the first client, let it be the mem client
361 */
362 if (mem_obj->nclients == 1)
363 return STORE_MEM_CLIENT;
364
365 /*
366 * If there is no disk file to open yet, we must make this a
367 * mem client. If we can't open the swapin file before writing
368 * to the client, there is no guarantee that we will be able
369 * to open it later when we really need it.
370 */
371 if (swap_status == SWAPOUT_NONE)
372 return STORE_MEM_CLIENT;
373
374 /*
375 * otherwise, make subsequent clients read from disk so they
376 * can not delay the first, and vice-versa.
377 */
378 return STORE_DISK_CLIENT;
379 }
380
381 StoreEntry::StoreEntry():
382 hidden_mem_obj(NULL),
383 swap_file_sz(0)
384 {
385 debugs(20, 3, HERE << "new StoreEntry " << this);
386 mem_obj = NULL;
387
388 expires = lastmod = lastref = timestamp = -1;
389
390 swap_status = SWAPOUT_NONE;
391 swap_filen = -1;
392 swap_dirn = -1;
393 }
394
395 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl):
396 hidden_mem_obj(NULL),
397 swap_file_sz(0)
398 {
399 debugs(20, 3, HERE << "new StoreEntry " << this);
400 mem_obj = new MemObject(aUrl, aLogUrl);
401
402 expires = lastmod = lastref = timestamp = -1;
403
404 swap_status = SWAPOUT_NONE;
405 swap_filen = -1;
406 swap_dirn = -1;
407 }
408
409 StoreEntry::~StoreEntry()
410 {
411 if (swap_filen >= 0) {
412 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
413 sd.disconnect(*this);
414 }
415 delete hidden_mem_obj;
416 }
417
418 #if USE_ADAPTATION
419 void
420 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
421 {
422 if (!deferredProducer)
423 deferredProducer = producer;
424 else
425 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
426 *deferredProducer << ", requested call: " << *producer);
427 }
428
429 void
430 StoreEntry::kickProducer()
431 {
432 if (deferredProducer != NULL) {
433 ScheduleCallHere(deferredProducer);
434 deferredProducer = NULL;
435 }
436 }
437 #endif
438
439 void
440 StoreEntry::destroyMemObject()
441 {
442 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
443 setMemStatus(NOT_IN_MEMORY);
444 MemObject *mem = mem_obj;
445 mem_obj = NULL;
446 delete mem;
447 delete hidden_mem_obj;
448 hidden_mem_obj = NULL;
449 }
450
451 void
452 StoreEntry::hideMemObject()
453 {
454 debugs(20, 3, HERE << "hiding " << mem_obj);
455 assert(mem_obj);
456 assert(!hidden_mem_obj);
457 hidden_mem_obj = mem_obj;
458 mem_obj = NULL;
459 }
460
461 void
462 destroyStoreEntry(void *data)
463 {
464 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
465 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
466 assert(e != NULL);
467
468 if (e == NullStoreEntry::getInstance())
469 return;
470
471 e->destroyMemObject();
472
473 e->hashDelete();
474
475 assert(e->key == NULL);
476
477 delete e;
478 }
479
480 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
481
482 void
483 StoreEntry::hashInsert(const cache_key * someKey)
484 {
485 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
486 key = storeKeyDup(someKey);
487 hash_join(store_table, this);
488 }
489
490 void
491 StoreEntry::hashDelete()
492 {
493 hash_remove_link(store_table, this);
494 storeKeyFree((const cache_key *)key);
495 key = NULL;
496 }
497
498 /* -------------------------------------------------------------------------- */
499
500 /* get rid of memory copy of the object */
501 void
502 StoreEntry::purgeMem()
503 {
504 if (mem_obj == NULL)
505 return;
506
507 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
508
509 destroyMemObject();
510
511 if (swap_status != SWAPOUT_DONE)
512 release();
513 }
514
515 /* RBC 20050104 this is wrong- memory ref counting
516 * is not at all equivalent to the store 'usage' concept
517 * which the replacement policies should be acting upon.
518 * specifically, object iteration within stores needs
519 * memory ref counting to prevent race conditions,
520 * but this should not influence store replacement.
521 */
522 void
523
524 StoreEntry::lock()
525 {
526 ++lock_count;
527 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
528 lock_count );
529 lastref = squid_curtime;
530 Store::Root().reference(*this);
531 }
532
533 void
534 StoreEntry::setReleaseFlag()
535 {
536 if (EBIT_TEST(flags, RELEASE_REQUEST))
537 return;
538
539 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
540
541 EBIT_SET(flags, RELEASE_REQUEST);
542 }
543
544 void
545 StoreEntry::releaseRequest()
546 {
547 if (EBIT_TEST(flags, RELEASE_REQUEST))
548 return;
549
550 setReleaseFlag();
551
552 /*
553 * Clear cachable flag here because we might get called before
554 * anyone else even looks at the cachability flag. Also, this
555 * prevents httpMakePublic from really setting a public key.
556 */
557 EBIT_CLR(flags, ENTRY_CACHABLE);
558
559 setPrivateKey();
560 }
561
562 /* unlock object, return -1 if object get released after unlock
563 * otherwise lock_count */
564 int
565 StoreEntry::unlock()
566 {
567 --lock_count;
568 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
569
570 if (lock_count)
571 return (int) lock_count;
572
573 if (store_status == STORE_PENDING)
574 setReleaseFlag();
575
576 assert(storePendingNClients(this) == 0);
577
578 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
579 this->release();
580 return 0;
581 }
582
583 if (EBIT_TEST(flags, KEY_PRIVATE))
584 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
585
586 Store::Root().handleIdleEntry(*this); // may delete us
587 return 0;
588 }
589
590 void
591 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
592 {
593 assert (aClient);
594 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
595
596 if (!result)
597 aClient->created (NullStoreEntry::getInstance());
598 else
599 aClient->created (result);
600 }
601
602 void
603 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
604 {
605 assert (aClient);
606 StoreEntry *result = storeGetPublicByRequest (request);
607
608 if (!result)
609 result = NullStoreEntry::getInstance();
610
611 aClient->created (result);
612 }
613
614 void
615 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
616 {
617 assert (aClient);
618 StoreEntry *result = storeGetPublic (uri, method);
619
620 if (!result)
621 result = NullStoreEntry::getInstance();
622
623 aClient->created (result);
624 }
625
626 StoreEntry *
627 storeGetPublic(const char *uri, const HttpRequestMethod& method)
628 {
629 return Store::Root().get(storeKeyPublic(uri, method));
630 }
631
632 StoreEntry *
633 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
634 {
635 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
636 }
637
638 StoreEntry *
639 storeGetPublicByRequest(HttpRequest * req)
640 {
641 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
642
643 if (e == NULL && req->method == METHOD_HEAD)
644 /* We can generate a HEAD reply from a cached GET object */
645 e = storeGetPublicByRequestMethod(req, METHOD_GET);
646
647 return e;
648 }
649
650 static int
651 getKeyCounter(void)
652 {
653 static int key_counter = 0;
654
655 if (++key_counter < 0)
656 key_counter = 1;
657
658 return key_counter;
659 }
660
661 /* RBC 20050104 AFAICT this should become simpler:
662 * rather than reinserting with a special key it should be marked
663 * as 'released' and then cleaned up when refcounting indicates.
664 * the StoreHashIndex could well implement its 'released' in the
665 * current manner.
666 * Also, clean log writing should skip over ia,t
667 * Otherwise, we need a 'remove from the index but not the store
668 * concept'.
669 */
670 void
671 StoreEntry::setPrivateKey()
672 {
673 const cache_key *newkey;
674
675 if (key && EBIT_TEST(flags, KEY_PRIVATE))
676 return; /* is already private */
677
678 if (key) {
679 if (swap_filen > -1)
680 storeDirSwapLog(this, SWAP_LOG_DEL);
681
682 hashDelete();
683 }
684
685 if (mem_obj != NULL) {
686 mem_obj->id = getKeyCounter();
687 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
688 } else {
689 newkey = storeKeyPrivate("JUNK", METHOD_NONE, getKeyCounter());
690 }
691
692 assert(hash_lookup(store_table, newkey) == NULL);
693 EBIT_SET(flags, KEY_PRIVATE);
694 hashInsert(newkey);
695 }
696
697 void
698 StoreEntry::setPublicKey()
699 {
700 StoreEntry *e2 = NULL;
701 const cache_key *newkey;
702
703 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
704 return; /* is already public */
705
706 assert(mem_obj);
707
708 /*
709 * We can't make RELEASE_REQUEST objects public. Depending on
710 * when RELEASE_REQUEST gets set, we might not be swapping out
711 * the object. If we're not swapping out, then subsequent
712 * store clients won't be able to access object data which has
713 * been freed from memory.
714 *
715 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
716 * be set, and StoreEntry::setPublicKey() should not be called.
717 */
718 #if MORE_DEBUG_OUTPUT
719
720 if (EBIT_TEST(flags, RELEASE_REQUEST))
721 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
722
723 #endif
724
725 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
726
727 if (mem_obj->request) {
728 HttpRequest *request = mem_obj->request;
729
730 if (!mem_obj->vary_headers) {
731 /* First handle the case where the object no longer varies */
732 safe_free(request->vary_headers);
733 } else {
734 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
735 /* Oops.. the variance has changed. Kill the base object
736 * to record the new variance key
737 */
738 safe_free(request->vary_headers); /* free old "bad" variance key */
739 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
740
741 if (pe)
742 pe->release();
743 }
744
745 /* Make sure the request knows the variance status */
746 if (!request->vary_headers) {
747 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
748
749 if (vary)
750 request->vary_headers = xstrdup(vary);
751 }
752 }
753
754 // TODO: storeGetPublic() calls below may create unlocked entries.
755 // We should add/use storeHas() API or lock/unlock those entries.
756 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
757 /* Create "vary" base object */
758 String vary;
759 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
760 /* We are allowed to do this typecast */
761 HttpReply *rep = new HttpReply;
762 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
763 vary = mem_obj->getReply()->header.getList(HDR_VARY);
764
765 if (vary.size()) {
766 /* Again, we own this structure layout */
767 rep->header.putStr(HDR_VARY, vary.termedBuf());
768 vary.clean();
769 }
770
771 #if X_ACCELERATOR_VARY
772 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
773
774 if (vary.defined()) {
775 /* Again, we own this structure layout */
776 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
777 vary.clean();
778 }
779
780 #endif
781 pe->replaceHttpReply(rep);
782
783 pe->timestampsSet();
784
785 pe->makePublic();
786
787 pe->complete();
788
789 pe->unlock();
790 }
791
792 newkey = storeKeyPublicByRequest(mem_obj->request);
793 } else
794 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
795
796 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
797 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
798 e2->setPrivateKey();
799 e2->release();
800
801 if (mem_obj->request)
802 newkey = storeKeyPublicByRequest(mem_obj->request);
803 else
804 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
805 }
806
807 if (key)
808 hashDelete();
809
810 EBIT_CLR(flags, KEY_PRIVATE);
811
812 hashInsert(newkey);
813
814 if (swap_filen > -1)
815 storeDirSwapLog(this, SWAP_LOG_ADD);
816 }
817
818 StoreEntry *
819 storeCreateEntry(const char *url, const char *log_url, request_flags flags, const HttpRequestMethod& method)
820 {
821 StoreEntry *e = NULL;
822 MemObject *mem = NULL;
823 debugs(20, 3, "storeCreateEntry: '" << url << "'");
824
825 e = new StoreEntry(url, log_url);
826 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
827 mem = e->mem_obj;
828 mem->method = method;
829
830 if (neighbors_do_private_keys || !flags.hierarchical)
831 e->setPrivateKey();
832 else
833 e->setPublicKey();
834
835 if (flags.cachable) {
836 EBIT_SET(e->flags, ENTRY_CACHABLE);
837 EBIT_CLR(e->flags, RELEASE_REQUEST);
838 } else {
839 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
840 e->releaseRequest();
841 }
842
843 e->store_status = STORE_PENDING;
844 e->setMemStatus(NOT_IN_MEMORY);
845 e->refcount = 0;
846 e->lastref = squid_curtime;
847 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
848 e->ping_status = PING_NONE;
849 EBIT_SET(e->flags, ENTRY_VALIDATED);
850 return e;
851 }
852
853 /* Mark object as expired */
854 void
855 StoreEntry::expireNow()
856 {
857 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
858 expires = squid_curtime;
859 }
860
861 void
862 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
863 {
864 PROF_start(storeWriteComplete);
865 StoreEntry *e = (StoreEntry *)data;
866
867 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
868 PROF_stop(storeWriteComplete);
869 return;
870 }
871
872 e->invokeHandlers();
873 PROF_stop(storeWriteComplete);
874 }
875
876 void
877 StoreEntry::write (StoreIOBuffer writeBuffer)
878 {
879 assert(mem_obj != NULL);
880 /* This assert will change when we teach the store to update */
881 PROF_start(StoreEntry_write);
882 assert(store_status == STORE_PENDING);
883
884 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
885 PROF_stop(StoreEntry_write);
886 storeGetMemSpace(writeBuffer.length);
887 mem_obj->write (writeBuffer, storeWriteComplete, this);
888 }
889
890 /* Append incoming data from a primary server to an entry. */
891 void
892 StoreEntry::append(char const *buf, int len)
893 {
894 assert(mem_obj != NULL);
895 assert(len >= 0);
896 assert(store_status == STORE_PENDING);
897
898 StoreIOBuffer tempBuffer;
899 tempBuffer.data = (char *)buf;
900 tempBuffer.length = len;
901 /*
902 * XXX sigh, offset might be < 0 here, but it gets "corrected"
903 * later. This offset crap is such a mess.
904 */
905 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
906 write(tempBuffer);
907 }
908
909 void
910 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
911 {
912 va_list args;
913 va_start(args, fmt);
914
915 storeAppendVPrintf(e, fmt, args);
916 va_end(args);
917 }
918
919 /* used be storeAppendPrintf and Packer */
920 void
921 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
922 {
923 LOCAL_ARRAY(char, buf, 4096);
924 buf[0] = '\0';
925 vsnprintf(buf, 4096, fmt, vargs);
926 e->append(buf, strlen(buf));
927 }
928
929 struct _store_check_cachable_hist {
930
931 struct {
932 int non_get;
933 int not_entry_cachable;
934 int wrong_content_length;
935 int negative_cached;
936 int too_big;
937 int too_small;
938 int private_key;
939 int too_many_open_files;
940 int too_many_open_fds;
941 } no;
942
943 struct {
944 int Default;
945 } yes;
946 } store_check_cachable_hist;
947
948 int
949 storeTooManyDiskFilesOpen(void)
950 {
951 if (Config.max_open_disk_fds == 0)
952 return 0;
953
954 if (store_open_disk_fd > Config.max_open_disk_fds)
955 return 1;
956
957 return 0;
958 }
959
960 int
961 StoreEntry::checkTooSmall()
962 {
963 if (EBIT_TEST(flags, ENTRY_SPECIAL))
964 return 0;
965
966 if (STORE_OK == store_status)
967 if (mem_obj->object_sz < 0 ||
968 mem_obj->object_sz < Config.Store.minObjectSize)
969 return 1;
970 if (getReply()->content_length > -1)
971 if (getReply()->content_length < Config.Store.minObjectSize)
972 return 1;
973 return 0;
974 }
975
976 // TODO: remove checks already performed by swapoutPossible()
977 // TODO: move "too many open..." checks outside -- we are called too early/late
978 int
979 StoreEntry::checkCachable()
980 {
981 #if CACHE_ALL_METHODS
982
983 if (mem_obj->method != METHOD_GET) {
984 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
985 ++store_check_cachable_hist.no.non_get;
986 } else
987 #endif
988 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
989 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
990 ++store_check_cachable_hist.no.wrong_content_length;
991 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
992 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
993 ++store_check_cachable_hist.no.not_entry_cachable;
994 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
995 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
996 ++store_check_cachable_hist.no.negative_cached;
997 return 0; /* avoid release call below */
998 } else if ((getReply()->content_length > 0 &&
999 getReply()->content_length
1000 > Config.Store.maxObjectSize) ||
1001 mem_obj->endOffset() > Config.Store.maxObjectSize) {
1002 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1003 ++store_check_cachable_hist.no.too_big;
1004 } else if (getReply()->content_length > Config.Store.maxObjectSize) {
1005 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1006 ++store_check_cachable_hist.no.too_big;
1007 } else if (checkTooSmall()) {
1008 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1009 ++store_check_cachable_hist.no.too_small;
1010 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1011 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1012 ++store_check_cachable_hist.no.private_key;
1013 } else if (swap_status != SWAPOUT_NONE) {
1014 /*
1015 * here we checked the swap_status because the remaining
1016 * cases are only relevant only if we haven't started swapping
1017 * out the object yet.
1018 */
1019 return 1;
1020 } else if (storeTooManyDiskFilesOpen()) {
1021 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1022 ++store_check_cachable_hist.no.too_many_open_files;
1023 } else if (fdNFree() < RESERVED_FD) {
1024 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1025 ++store_check_cachable_hist.no.too_many_open_fds;
1026 } else {
1027 ++store_check_cachable_hist.yes.Default;
1028 return 1;
1029 }
1030
1031 releaseRequest();
1032 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1033 return 0;
1034 }
1035
1036 void
1037 storeCheckCachableStats(StoreEntry *sentry)
1038 {
1039 storeAppendPrintf(sentry, "Category\t Count\n");
1040
1041 #if CACHE_ALL_METHODS
1042
1043 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1044 store_check_cachable_hist.no.non_get);
1045 #endif
1046
1047 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1048 store_check_cachable_hist.no.not_entry_cachable);
1049 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1050 store_check_cachable_hist.no.wrong_content_length);
1051 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1052 store_check_cachable_hist.no.negative_cached);
1053 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1054 store_check_cachable_hist.no.too_big);
1055 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1056 store_check_cachable_hist.no.too_small);
1057 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1058 store_check_cachable_hist.no.private_key);
1059 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1060 store_check_cachable_hist.no.too_many_open_files);
1061 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1062 store_check_cachable_hist.no.too_many_open_fds);
1063 storeAppendPrintf(sentry, "yes.default\t%d\n",
1064 store_check_cachable_hist.yes.Default);
1065 }
1066
1067 void
1068 StoreEntry::complete()
1069 {
1070 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1071
1072 if (store_status != STORE_PENDING) {
1073 /*
1074 * if we're not STORE_PENDING, then probably we got aborted
1075 * and there should be NO clients on this entry
1076 */
1077 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1078 assert(mem_obj->nclients == 0);
1079 return;
1080 }
1081
1082 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1083 * in use of object_sz?
1084 */
1085 mem_obj->object_sz = mem_obj->endOffset();
1086
1087 store_status = STORE_OK;
1088
1089 assert(mem_status == NOT_IN_MEMORY);
1090
1091 if (!validLength()) {
1092 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1093 releaseRequest();
1094 }
1095
1096 #if USE_CACHE_DIGESTS
1097 if (mem_obj->request)
1098 mem_obj->request->hier.store_complete_stop = current_time;
1099
1100 #endif
1101 /*
1102 * We used to call invokeHandlers, then storeSwapOut. However,
1103 * Madhukar Reddy <myreddy@persistence.com> reported that
1104 * responses without content length would sometimes get released
1105 * in client_side, thinking that the response is incomplete.
1106 */
1107 invokeHandlers();
1108 }
1109
1110 /*
1111 * Someone wants to abort this transfer. Set the reason in the
1112 * request structure, call the server-side callback and mark the
1113 * entry for releasing
1114 */
1115 void
1116 StoreEntry::abort()
1117 {
1118 ++statCounter.aborted_requests;
1119 assert(store_status == STORE_PENDING);
1120 assert(mem_obj != NULL);
1121 debugs(20, 6, "storeAbort: " << getMD5Text());
1122
1123 lock(); /* lock while aborting */
1124 negativeCache();
1125
1126 releaseRequest();
1127
1128 EBIT_SET(flags, ENTRY_ABORTED);
1129
1130 setMemStatus(NOT_IN_MEMORY);
1131
1132 store_status = STORE_OK;
1133
1134 /* Notify the server side */
1135
1136 /*
1137 * DPW 2007-05-07
1138 * Should we check abort.data for validity?
1139 */
1140 if (mem_obj->abort.callback) {
1141 if (!cbdataReferenceValid(mem_obj->abort.data))
1142 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1143 eventAdd("mem_obj->abort.callback",
1144 mem_obj->abort.callback,
1145 mem_obj->abort.data,
1146 0.0,
1147 true);
1148 unregisterAbort();
1149 }
1150
1151 /* XXX Should we reverse these two, so that there is no
1152 * unneeded disk swapping triggered?
1153 */
1154 /* Notify the client side */
1155 invokeHandlers();
1156
1157 // abort swap out, invalidating what was created so far (release follows)
1158 swapOutFileClose(StoreIOState::writerGone);
1159
1160 unlock(); /* unlock */
1161 }
1162
1163 /**
1164 * Clear Memory storage to accommodate the given object len
1165 */
1166 void
1167 storeGetMemSpace(int size)
1168 {
1169 PROF_start(storeGetMemSpace);
1170 StoreEntry *e = NULL;
1171 int released = 0;
1172 static time_t last_check = 0;
1173 size_t pages_needed;
1174 RemovalPurgeWalker *walker;
1175
1176 if (squid_curtime == last_check) {
1177 PROF_stop(storeGetMemSpace);
1178 return;
1179 }
1180
1181 last_check = squid_curtime;
1182
1183 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1184
1185 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1186 PROF_stop(storeGetMemSpace);
1187 return;
1188 }
1189
1190 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1191 " pages");
1192
1193 /* XXX what to set as max_scan here? */
1194 walker = mem_policy->PurgeInit(mem_policy, 100000);
1195
1196 while ((e = walker->Next(walker))) {
1197 e->purgeMem();
1198 ++released;
1199
1200 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1201 break;
1202 }
1203
1204 walker->Done(walker);
1205 debugs(20, 3, "storeGetMemSpace stats:");
1206 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1207 debugs(20, 3, " " << std::setw(6) << released << " were released");
1208 PROF_stop(storeGetMemSpace);
1209 }
1210
1211 /* thunk through to Store::Root().maintain(). Note that this would be better still
1212 * if registered against the root store itself, but that requires more complex
1213 * update logic - bigger fish to fry first. Long term each store when
1214 * it becomes active will self register
1215 */
1216 void
1217 Store::Maintain(void *notused)
1218 {
1219 Store::Root().maintain();
1220
1221 /* Reregister a maintain event .. */
1222 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1223
1224 }
1225
1226 /* The maximum objects to scan for maintain storage space */
1227 #define MAINTAIN_MAX_SCAN 1024
1228 #define MAINTAIN_MAX_REMOVE 64
1229
1230 /*
1231 * This routine is to be called by main loop in main.c.
1232 * It removes expired objects on only one bucket for each time called.
1233 *
1234 * This should get called 1/s from main().
1235 */
1236 void
1237 StoreController::maintain()
1238 {
1239 static time_t last_warn_time = 0;
1240
1241 PROF_start(storeMaintainSwapSpace);
1242 swapDir->maintain();
1243
1244 /* this should be emitted by the oversize dir, not globally */
1245
1246 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1247 if (squid_curtime - last_warn_time > 10) {
1248 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1249 << Store::Root().currentSize() / 1024.0 << " KB > "
1250 << (Store::Root().maxSize() >> 10) << " KB");
1251 last_warn_time = squid_curtime;
1252 }
1253 }
1254
1255 PROF_stop(storeMaintainSwapSpace);
1256 }
1257
1258 /* release an object from a cache */
1259 void
1260 StoreEntry::release()
1261 {
1262 PROF_start(storeRelease);
1263 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1264 /* If, for any reason we can't discard this object because of an
1265 * outstanding request, mark it for pending release */
1266
1267 if (locked()) {
1268 expireNow();
1269 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1270 releaseRequest();
1271 PROF_stop(storeRelease);
1272 return;
1273 }
1274
1275 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1276 setPrivateKey();
1277
1278 if (mem_obj)
1279 destroyMemObject();
1280
1281 if (swap_filen > -1) {
1282 /*
1283 * Fake a call to StoreEntry->lock() When rebuilding is done,
1284 * we'll just call StoreEntry->unlock() on these.
1285 */
1286 ++lock_count;
1287 setReleaseFlag();
1288 LateReleaseStack.push_back(this);
1289 } else {
1290 destroyStoreEntry(static_cast<hash_link *>(this));
1291 // "this" is no longer valid
1292 }
1293
1294 PROF_stop(storeRelease);
1295 return;
1296 }
1297
1298 storeLog(STORE_LOG_RELEASE, this);
1299
1300 if (swap_filen > -1) {
1301 // log before unlink() below clears swap_filen
1302 if (!EBIT_TEST(flags, KEY_PRIVATE))
1303 storeDirSwapLog(this, SWAP_LOG_DEL);
1304
1305 unlink();
1306 }
1307
1308 setMemStatus(NOT_IN_MEMORY);
1309 destroyStoreEntry(static_cast<hash_link *>(this));
1310 PROF_stop(storeRelease);
1311 }
1312
1313 static void
1314 storeLateRelease(void *unused)
1315 {
1316 StoreEntry *e;
1317 int i;
1318 static int n = 0;
1319
1320 if (StoreController::store_dirs_rebuilding) {
1321 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1322 return;
1323 }
1324
1325 for (i = 0; i < 10; ++i) {
1326 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1327
1328 if (e == NULL) {
1329 /* done! */
1330 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1331 return;
1332 }
1333
1334 e->unlock();
1335 ++n;
1336 }
1337
1338 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1339 }
1340
1341 /* return 1 if a store entry is locked */
1342 int
1343 StoreEntry::locked() const
1344 {
1345 if (lock_count)
1346 return 1;
1347
1348 if (swap_status == SWAPOUT_WRITING)
1349 return 1;
1350
1351 if (store_status == STORE_PENDING)
1352 return 1;
1353
1354 /*
1355 * SPECIAL, PUBLIC entries should be "locked"
1356 */
1357 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1358 if (!EBIT_TEST(flags, KEY_PRIVATE))
1359 return 1;
1360
1361 return 0;
1362 }
1363
1364 bool
1365 StoreEntry::validLength() const
1366 {
1367 int64_t diff;
1368 const HttpReply *reply;
1369 assert(mem_obj != NULL);
1370 reply = getReply();
1371 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1372 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1373 objectLen());
1374 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1375 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1376
1377 if (reply->content_length < 0) {
1378 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1379 return 1;
1380 }
1381
1382 if (reply->hdr_sz == 0) {
1383 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1384 return 1;
1385 }
1386
1387 if (mem_obj->method == METHOD_HEAD) {
1388 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1389 return 1;
1390 }
1391
1392 if (reply->sline.status == HTTP_NOT_MODIFIED)
1393 return 1;
1394
1395 if (reply->sline.status == HTTP_NO_CONTENT)
1396 return 1;
1397
1398 diff = reply->hdr_sz + reply->content_length - objectLen();
1399
1400 if (diff == 0)
1401 return 1;
1402
1403 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1404
1405 return 0;
1406 }
1407
1408 static void
1409 storeRegisterWithCacheManager(void)
1410 {
1411 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1412 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1413 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1414 storeCheckCachableStats, 0, 1);
1415 }
1416
1417 void
1418 storeInit(void)
1419 {
1420 storeKeyInit();
1421 mem_policy = createRemovalPolicy(Config.memPolicy);
1422 storeDigestInit();
1423 storeLogOpen();
1424 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1425 Store::Root().init();
1426 storeRebuildStart();
1427
1428 storeRegisterWithCacheManager();
1429 }
1430
1431 void
1432 storeConfigure(void)
1433 {
1434 store_swap_high = (long) (((float) Store::Root().maxSize() *
1435 (float) Config.Swap.highWaterMark) / (float) 100);
1436 store_swap_low = (long) (((float) Store::Root().maxSize() *
1437 (float) Config.Swap.lowWaterMark) / (float) 100);
1438 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1439 }
1440
1441 bool
1442 StoreEntry::memoryCachable() const
1443 {
1444 if (mem_obj == NULL)
1445 return 0;
1446
1447 if (mem_obj->data_hdr.size() == 0)
1448 return 0;
1449
1450 if (mem_obj->inmem_lo != 0)
1451 return 0;
1452
1453 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1454 return 0;
1455
1456 return 1;
1457 }
1458
1459 int
1460 StoreEntry::checkNegativeHit() const
1461 {
1462 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1463 return 0;
1464
1465 if (expires <= squid_curtime)
1466 return 0;
1467
1468 if (store_status != STORE_OK)
1469 return 0;
1470
1471 return 1;
1472 }
1473
1474 /**
1475 * Set object for negative caching.
1476 * Preserves any expiry information given by the server.
1477 * In absence of proper expiry info it will set to expire immediately,
1478 * or with HTTP-violations enabled the configured negative-TTL is observed
1479 */
1480 void
1481 StoreEntry::negativeCache()
1482 {
1483 // XXX: should make the default for expires 0 instead of -1
1484 // so we can distinguish "Expires: -1" from nothing.
1485 if (expires <= 0)
1486 #if USE_HTTP_VIOLATIONS
1487 expires = squid_curtime + Config.negativeTtl;
1488 #else
1489 expires = squid_curtime;
1490 #endif
1491 EBIT_SET(flags, ENTRY_NEGCACHED);
1492 }
1493
1494 void
1495 storeFreeMemory(void)
1496 {
1497 Store::Root(NULL);
1498 #if USE_CACHE_DIGESTS
1499
1500 if (store_digest)
1501 cacheDigestDestroy(store_digest);
1502
1503 #endif
1504
1505 store_digest = NULL;
1506 }
1507
1508 int
1509 expiresMoreThan(time_t expires, time_t when)
1510 {
1511 if (expires < 0) /* No Expires given */
1512 return 1;
1513
1514 return (expires > (squid_curtime + when));
1515 }
1516
1517 int
1518 StoreEntry::validToSend() const
1519 {
1520 if (EBIT_TEST(flags, RELEASE_REQUEST))
1521 return 0;
1522
1523 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1524 if (expires <= squid_curtime)
1525 return 0;
1526
1527 if (EBIT_TEST(flags, ENTRY_ABORTED))
1528 return 0;
1529
1530 return 1;
1531 }
1532
1533 void
1534 StoreEntry::timestampsSet()
1535 {
1536 const HttpReply *reply = getReply();
1537 time_t served_date = reply->date;
1538 int age = reply->header.getInt(HDR_AGE);
1539 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1540 /* make sure that 0 <= served_date <= squid_curtime */
1541
1542 if (served_date < 0 || served_date > squid_curtime)
1543 served_date = squid_curtime;
1544
1545 /* Bug 1791:
1546 * If the returned Date: is more than 24 hours older than
1547 * the squid_curtime, then one of us needs to use NTP to set our
1548 * clock. We'll pretend that our clock is right.
1549 */
1550 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1551 served_date = squid_curtime;
1552
1553 /*
1554 * Compensate with Age header if origin server clock is ahead
1555 * of us and there is a cache in between us and the origin
1556 * server. But DONT compensate if the age value is larger than
1557 * squid_curtime because it results in a negative served_date.
1558 */
1559 if (age > squid_curtime - served_date)
1560 if (squid_curtime > age)
1561 served_date = squid_curtime - age;
1562
1563 // compensate for Squid-to-server and server-to-Squid delays
1564 if (mem_obj && mem_obj->request) {
1565 const time_t request_sent =
1566 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1567 if (0 < request_sent && request_sent < squid_curtime)
1568 served_date -= (squid_curtime - request_sent);
1569 }
1570
1571 if (reply->expires > 0 && reply->date > -1)
1572 expires = served_date + (reply->expires - reply->date);
1573 else
1574 expires = reply->expires;
1575
1576 lastmod = reply->last_modified;
1577
1578 timestamp = served_date;
1579 }
1580
1581 void
1582 StoreEntry::registerAbort(STABH * cb, void *data)
1583 {
1584 assert(mem_obj);
1585 assert(mem_obj->abort.callback == NULL);
1586 mem_obj->abort.callback = cb;
1587 mem_obj->abort.data = cbdataReference(data);
1588 }
1589
1590 void
1591 StoreEntry::unregisterAbort()
1592 {
1593 assert(mem_obj);
1594 if (mem_obj->abort.callback) {
1595 mem_obj->abort.callback = NULL;
1596 cbdataReferenceDone(mem_obj->abort.data);
1597 }
1598 }
1599
1600 void
1601 StoreEntry::dump(int l) const
1602 {
1603 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1604 debugs(20, l, "StoreEntry->next: " << next);
1605 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1606 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1607 debugs(20, l, "StoreEntry->lastref: " << lastref);
1608 debugs(20, l, "StoreEntry->expires: " << expires);
1609 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1610 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1611 debugs(20, l, "StoreEntry->refcount: " << refcount);
1612 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1613 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1614 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1615 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1616 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1617 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1618 debugs(20, l, "StoreEntry->store_status: " << store_status);
1619 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1620 }
1621
1622 /*
1623 * NOTE, this function assumes only two mem states
1624 */
1625 void
1626 StoreEntry::setMemStatus(mem_status_t new_status)
1627 {
1628 if (new_status == mem_status)
1629 return;
1630
1631 // are we using a shared memory cache?
1632 if (Config.memShared && IamWorkerProcess()) {
1633 // enumerate calling cases if shared memory is enabled
1634 assert(new_status != IN_MEMORY || EBIT_TEST(flags, ENTRY_SPECIAL));
1635 // This method was designed to update replacement policy, not to
1636 // actually purge something from the memory cache (TODO: rename?).
1637 // Shared memory cache does not have a policy that needs updates.
1638 mem_status = new_status;
1639 return;
1640 }
1641
1642 assert(mem_obj != NULL);
1643
1644 if (new_status == IN_MEMORY) {
1645 assert(mem_obj->inmem_lo == 0);
1646
1647 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1648 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1649 } else {
1650 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1651 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1652 }
1653
1654 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1655 } else {
1656 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1657 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1658 } else {
1659 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1660 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1661 }
1662
1663 --hot_obj_count;
1664 }
1665
1666 mem_status = new_status;
1667 }
1668
1669 const char *
1670 StoreEntry::url() const
1671 {
1672 if (this == NULL)
1673 return "[null_entry]";
1674 else if (mem_obj == NULL)
1675 return "[null_mem_obj]";
1676 else
1677 return mem_obj->url;
1678 }
1679
1680 void
1681 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1682 {
1683 if (mem_obj)
1684 return;
1685
1686 if (hidden_mem_obj) {
1687 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1688 mem_obj = hidden_mem_obj;
1689 hidden_mem_obj = NULL;
1690 mem_obj->resetUrls(aUrl, aLogUrl);
1691 return;
1692 }
1693
1694 mem_obj = new MemObject(aUrl, aLogUrl);
1695 }
1696
1697 /* this just sets DELAY_SENDING */
1698 void
1699 StoreEntry::buffer()
1700 {
1701 EBIT_SET(flags, DELAY_SENDING);
1702 }
1703
1704 /* this just clears DELAY_SENDING and Invokes the handlers */
1705 void
1706 StoreEntry::flush()
1707 {
1708 if (EBIT_TEST(flags, DELAY_SENDING)) {
1709 EBIT_CLR(flags, DELAY_SENDING);
1710 invokeHandlers();
1711 }
1712 }
1713
1714 int64_t
1715 StoreEntry::objectLen() const
1716 {
1717 assert(mem_obj != NULL);
1718 return mem_obj->object_sz;
1719 }
1720
1721 int64_t
1722 StoreEntry::contentLen() const
1723 {
1724 assert(mem_obj != NULL);
1725 assert(getReply() != NULL);
1726 return objectLen() - getReply()->hdr_sz;
1727 }
1728
1729 HttpReply const *
1730 StoreEntry::getReply () const
1731 {
1732 if (NULL == mem_obj)
1733 return NULL;
1734
1735 return mem_obj->getReply();
1736 }
1737
1738 void
1739 StoreEntry::reset()
1740 {
1741 assert (mem_obj);
1742 debugs(20, 3, "StoreEntry::reset: " << url());
1743 mem_obj->reset();
1744 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1745 rep->reset();
1746 expires = lastmod = timestamp = -1;
1747 }
1748
1749 /*
1750 * storeFsInit
1751 *
1752 * This routine calls the SETUP routine for each fs type.
1753 * I don't know where the best place for this is, and I'm not going to shuffle
1754 * around large chunks of code right now (that can be done once its working.)
1755 */
1756 void
1757 storeFsInit(void)
1758 {
1759 storeReplSetup();
1760 }
1761
1762 /*
1763 * called to add another store removal policy module
1764 */
1765 void
1766 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1767 {
1768 int i;
1769
1770 /* find the number of currently known repl types */
1771 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1772 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1773 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1774 return;
1775 }
1776 }
1777
1778 /* add the new type */
1779 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1780
1781 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1782
1783 storerepl_list[i].typestr = type;
1784
1785 storerepl_list[i].create = create;
1786 }
1787
1788 /*
1789 * Create a removal policy instance
1790 */
1791 RemovalPolicy *
1792 createRemovalPolicy(RemovalPolicySettings * settings)
1793 {
1794 storerepl_entry_t *r;
1795
1796 for (r = storerepl_list; r && r->typestr; ++r) {
1797 if (strcmp(r->typestr, settings->type) == 0)
1798 return r->create(settings->args);
1799 }
1800
1801 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1802 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1803 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1804 fatalf("ERROR: Unknown policy %s\n", settings->type);
1805 return NULL; /* NOTREACHED */
1806 }
1807
1808 #if 0
1809 void
1810 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1811 {
1812 if (e->swap_file_number == filn)
1813 return;
1814
1815 if (filn < 0) {
1816 assert(-1 == filn);
1817 storeDirMapBitReset(e->swap_file_number);
1818 storeDirLRUDelete(e);
1819 e->swap_file_number = -1;
1820 } else {
1821 assert(-1 == e->swap_file_number);
1822 storeDirMapBitSet(e->swap_file_number = filn);
1823 storeDirLRUAdd(e);
1824 }
1825 }
1826
1827 #endif
1828
1829 /*
1830 * Replace a store entry with
1831 * a new reply. This eats the reply.
1832 */
1833 void
1834 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1835 {
1836 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1837
1838 if (!mem_obj) {
1839 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1840 return;
1841 }
1842
1843 mem_obj->replaceHttpReply(rep);
1844
1845 if (andStartWriting)
1846 startWriting();
1847 }
1848
1849 void
1850 StoreEntry::startWriting()
1851 {
1852 Packer p;
1853
1854 /* TODO: when we store headers serparately remove the header portion */
1855 /* TODO: mark the length of the headers ? */
1856 /* We ONLY want the headers */
1857 packerToStoreInit(&p, this);
1858
1859 assert (isEmpty());
1860 assert(mem_obj);
1861
1862 const HttpReply *rep = getReply();
1863 assert(rep);
1864
1865 rep->packHeadersInto(&p);
1866 mem_obj->markEndOfReplyHeaders();
1867
1868 rep->body.packInto(&p);
1869
1870 packerClean(&p);
1871 }
1872
1873 char const *
1874 StoreEntry::getSerialisedMetaData()
1875 {
1876 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1877 int swap_hdr_sz;
1878 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1879 storeSwapTLVFree(tlv_list);
1880 assert (swap_hdr_sz >= 0);
1881 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1882 return result;
1883 }
1884
1885 void
1886 StoreEntry::trimMemory(const bool preserveSwappable)
1887 {
1888 /*
1889 * DPW 2007-05-09
1890 * Bug #1943. We must not let go any data for IN_MEMORY
1891 * objects. We have to wait until the mem_status changes.
1892 */
1893 if (mem_status == IN_MEMORY)
1894 return;
1895
1896 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1897 return; // cannot trim because we do not load them again
1898
1899 if (!preserveSwappable) {
1900 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1901 /* Nothing to do */
1902 return;
1903 }
1904 /*
1905 * Its not swap-able, and we're about to delete a chunk,
1906 * so we must make it PRIVATE. This is tricky/ugly because
1907 * for the most part, we treat swapable == cachable here.
1908 */
1909 releaseRequest();
1910 mem_obj->trimUnSwappable ();
1911 } else {
1912 mem_obj->trimSwappable ();
1913 }
1914 }
1915
1916 bool
1917 StoreEntry::modifiedSince(HttpRequest * request) const
1918 {
1919 int object_length;
1920 time_t mod_time = lastmod;
1921
1922 if (mod_time < 0)
1923 mod_time = timestamp;
1924
1925 debugs(88, 3, "modifiedSince: '" << url() << "'");
1926
1927 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1928
1929 if (mod_time < 0)
1930 return true;
1931
1932 /* Find size of the object */
1933 object_length = getReply()->content_length;
1934
1935 if (object_length < 0)
1936 object_length = contentLen();
1937
1938 if (mod_time > request->ims) {
1939 debugs(88, 3, "--> YES: entry newer than client");
1940 return true;
1941 } else if (mod_time < request->ims) {
1942 debugs(88, 3, "--> NO: entry older than client");
1943 return false;
1944 } else if (request->imslen < 0) {
1945 debugs(88, 3, "--> NO: same LMT, no client length");
1946 return false;
1947 } else if (request->imslen == object_length) {
1948 debugs(88, 3, "--> NO: same LMT, same length");
1949 return false;
1950 } else {
1951 debugs(88, 3, "--> YES: same LMT, different length");
1952 return true;
1953 }
1954 }
1955
1956 bool
1957 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1958 {
1959 const String reqETags = request.header.getList(HDR_IF_MATCH);
1960 return hasOneOfEtags(reqETags, false);
1961 }
1962
1963 bool
1964 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1965 {
1966 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1967 // weak comparison is allowed only for HEAD or full-body GET requests
1968 const bool allowWeakMatch = !request.flags.range &&
1969 (request.method == METHOD_GET || request.method == METHOD_HEAD);
1970 return hasOneOfEtags(reqETags, allowWeakMatch);
1971 }
1972
1973 /// whether at least one of the request ETags matches entity ETag
1974 bool
1975 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1976 {
1977 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1978 if (!repETag.str)
1979 return strListIsMember(&reqETags, "*", ',');
1980
1981 bool matched = false;
1982 const char *pos = NULL;
1983 const char *item;
1984 int ilen;
1985 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1986 if (!strncmp(item, "*", ilen))
1987 matched = true;
1988 else {
1989 String str;
1990 str.append(item, ilen);
1991 ETag reqETag;
1992 if (etagParseInit(&reqETag, str.termedBuf())) {
1993 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1994 etagIsStrongEqual(repETag, reqETag);
1995 }
1996 }
1997 }
1998 return matched;
1999 }
2000
2001 SwapDir::Pointer
2002 StoreEntry::store() const
2003 {
2004 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2005 return INDEXSD(swap_dirn);
2006 }
2007
2008 void
2009 StoreEntry::unlink()
2010 {
2011 store()->unlink(*this); // implies disconnect()
2012 swap_filen = -1;
2013 swap_dirn = -1;
2014 swap_status = SWAPOUT_NONE;
2015 }
2016
2017 /*
2018 * return true if the entry is in a state where
2019 * it can accept more data (ie with write() method)
2020 */
2021 bool
2022 StoreEntry::isAccepting() const
2023 {
2024 if (STORE_PENDING != store_status)
2025 return false;
2026
2027 if (EBIT_TEST(flags, ENTRY_ABORTED))
2028 return false;
2029
2030 return true;
2031 }
2032
2033 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2034 {
2035 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2036 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2037 e.swap_status;
2038 }
2039
2040 /* NullStoreEntry */
2041
2042 NullStoreEntry NullStoreEntry::_instance;
2043
2044 NullStoreEntry *
2045 NullStoreEntry::getInstance()
2046 {
2047 return &_instance;
2048 }
2049
2050 char const *
2051 NullStoreEntry::getMD5Text() const
2052 {
2053 return "N/A";
2054 }
2055
2056 void
2057 NullStoreEntry::operator delete(void*)
2058 {
2059 fatal ("Attempt to delete NullStoreEntry\n");
2060 }
2061
2062 char const *
2063 NullStoreEntry::getSerialisedMetaData()
2064 {
2065 return NULL;
2066 }
2067
2068 #if !_USE_INLINE_
2069 #include "Store.cci"
2070 #endif