]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Merge from trunk
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * DEBUG: section 20 Storage Manager
4 * AUTHOR: Harvest Derived
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "CacheDigest.h"
36 #include "CacheManager.h"
37 #include "comm/Connection.h"
38 #include "comm/Read.h"
39 #include "ETag.h"
40 #include "event.h"
41 #include "fde.h"
42 #include "globals.h"
43 #include "http.h"
44 #include "HttpReply.h"
45 #include "HttpRequest.h"
46 #include "mem_node.h"
47 #include "MemObject.h"
48 #include "mgr/Registration.h"
49 #include "mgr/StoreIoAction.h"
50 #include "profiler/Profiler.h"
51 #include "repl_modules.h"
52 #include "RequestFlags.h"
53 #include "SquidConfig.h"
54 #include "SquidTime.h"
55 #include "StatCounters.h"
56 #include "stmem.h"
57 #include "Store.h"
58 #include "store_digest.h"
59 #include "store_key_md5.h"
60 #include "store_key_md5.h"
61 #include "store_log.h"
62 #include "store_rebuild.h"
63 #include "StoreClient.h"
64 #include "StoreIOState.h"
65 #include "StoreMeta.h"
66 #include "StrList.h"
67 #include "swap_log_op.h"
68 #include "SwapDir.h"
69 #include "tools.h"
70 #if USE_DELAY_POOLS
71 #include "DelayPools.h"
72 #endif
73
74 #include <climits>
75 #include <stack>
76
77 #define REBUILD_TIMESTAMP_DELTA_MAX 2
78
79 #define STORE_IN_MEM_BUCKETS (229)
80
81 /** \todo Convert these string constants to enum string-arrays generated */
82
83 const char *memStatusStr[] = {
84 "NOT_IN_MEMORY",
85 "IN_MEMORY"
86 };
87
88 const char *pingStatusStr[] = {
89 "PING_NONE",
90 "PING_WAITING",
91 "PING_DONE"
92 };
93
94 const char *storeStatusStr[] = {
95 "STORE_OK",
96 "STORE_PENDING"
97 };
98
99 const char *swapStatusStr[] = {
100 "SWAPOUT_NONE",
101 "SWAPOUT_WRITING",
102 "SWAPOUT_DONE"
103 };
104
105 /*
106 * This defines an repl type
107 */
108
109 typedef struct _storerepl_entry storerepl_entry_t;
110
111 struct _storerepl_entry {
112 const char *typestr;
113 REMOVALPOLICYCREATE *create;
114 };
115
116 static storerepl_entry_t *storerepl_list = NULL;
117
118 /*
119 * local function prototypes
120 */
121 static int getKeyCounter(void);
122 static OBJH storeCheckCachableStats;
123 static EVH storeLateRelease;
124
125 /*
126 * local variables
127 */
128 static std::stack<StoreEntry*> LateReleaseStack;
129 MemAllocator *StoreEntry::pool = NULL;
130
131 StorePointer Store::CurrentRoot = NULL;
132
133 void
134 Store::Root(Store * aRoot)
135 {
136 CurrentRoot = aRoot;
137 }
138
139 void
140 Store::Root(StorePointer aRoot)
141 {
142 Root(aRoot.getRaw());
143 }
144
145 void
146 Store::Stats(StoreEntry * output)
147 {
148 assert (output);
149 Root().stat(*output);
150 }
151
152 void
153 Store::create()
154 {}
155
156 void
157 Store::diskFull()
158 {}
159
160 void
161 Store::sync()
162 {}
163
164 void
165 Store::unlink (StoreEntry &anEntry)
166 {
167 fatal("Store::unlink on invalid Store\n");
168 }
169
170 void *
171 StoreEntry::operator new (size_t bytecount)
172 {
173 assert (bytecount == sizeof (StoreEntry));
174
175 if (!pool) {
176 pool = memPoolCreate ("StoreEntry", bytecount);
177 pool->setChunkSize(2048 * 1024);
178 }
179
180 return pool->alloc();
181 }
182
183 void
184 StoreEntry::operator delete (void *address)
185 {
186 pool->freeOne(address);
187 }
188
189 void
190 StoreEntry::makePublic()
191 {
192 /* This object can be cached for a long time */
193
194 if (!EBIT_TEST(flags, RELEASE_REQUEST))
195 setPublicKey();
196 }
197
198 void
199 StoreEntry::makePrivate()
200 {
201 /* This object should never be cached at all */
202 expireNow();
203 releaseRequest(); /* delete object when not used */
204 }
205
206 void
207 StoreEntry::cacheNegatively()
208 {
209 /* This object may be negatively cached */
210 negativeCache();
211 makePublic();
212 }
213
214 size_t
215 StoreEntry::inUseCount()
216 {
217 if (!pool)
218 return 0;
219 return pool->getInUseCount();
220 }
221
222 const char *
223 StoreEntry::getMD5Text() const
224 {
225 return storeKeyText((const cache_key *)key);
226 }
227
228 #include "comm.h"
229
230 void
231 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
232 {
233 StoreEntry *anEntry = (StoreEntry *)theContext;
234 anEntry->delayAwareRead(aRead.conn,
235 aRead.buf,
236 aRead.len,
237 aRead.callback);
238 }
239
240 void
241 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
242 {
243 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
244 /* sketch: readdeferer* = getdeferer.
245 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
246 */
247
248 if (amountToRead == 0) {
249 assert (mem_obj);
250 /* read ahead limit */
251 /* Perhaps these two calls should both live in MemObject */
252 #if USE_DELAY_POOLS
253 if (!mem_obj->readAheadPolicyCanRead()) {
254 #endif
255 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
256 return;
257 #if USE_DELAY_POOLS
258 }
259
260 /* delay id limit */
261 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
262 return;
263
264 #endif
265
266 }
267
268 if (fd_table[conn->fd].closing()) {
269 // Readers must have closing callbacks if they want to be notified. No
270 // readers appeared to care around 2009/12/14 as they skipped reading
271 // for other reasons. Closing may already be true at the delyaAwareRead
272 // call time or may happen while we wait after delayRead() above.
273 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
274 callback);
275 return; // the read callback will never be called
276 }
277
278 comm_read(conn, buf, amountToRead, callback);
279 }
280
281 size_t
282 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
283 {
284 if (mem_obj == NULL)
285 return aRange.end;
286
287 #if URL_CHECKSUM_DEBUG
288
289 mem_obj->checkUrlChecksum();
290
291 #endif
292
293 if (!mem_obj->readAheadPolicyCanRead())
294 return 0;
295
296 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
297 }
298
299 bool
300 StoreEntry::checkDeferRead(int fd) const
301 {
302 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
303 }
304
305 void
306 StoreEntry::setNoDelay (bool const newValue)
307 {
308 if (mem_obj)
309 mem_obj->setNoDelay(newValue);
310 }
311
312 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
313 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
314 // It does not mean we will read from disk exclusively (or at all!).
315 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
316 // XXX: Collapsed clients cannot predict their type.
317 store_client_t
318 StoreEntry::storeClientType() const
319 {
320 /* The needed offset isn't in memory
321 * XXX TODO: this is wrong for range requests
322 * as the needed offset may *not* be 0, AND
323 * offset 0 in the memory object is the HTTP headers.
324 */
325
326 assert(mem_obj);
327
328 if (mem_obj->inmem_lo)
329 return STORE_DISK_CLIENT;
330
331 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
332 /* I don't think we should be adding clients to aborted entries */
333 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
334 return STORE_MEM_CLIENT;
335 }
336
337 if (store_status == STORE_OK) {
338 /* the object has completed. */
339
340 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
341 if (swap_status == SWAPOUT_DONE) {
342 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
343 if (mem_obj->endOffset() == mem_obj->object_sz) {
344 /* hot object fully swapped in (XXX: or swapped out?) */
345 return STORE_MEM_CLIENT;
346 }
347 } else {
348 /* Memory-only, or currently being swapped out */
349 return STORE_MEM_CLIENT;
350 }
351 }
352 return STORE_DISK_CLIENT;
353 }
354
355 /* here and past, entry is STORE_PENDING */
356 /*
357 * If this is the first client, let it be the mem client
358 */
359 if (mem_obj->nclients == 1)
360 return STORE_MEM_CLIENT;
361
362 /*
363 * If there is no disk file to open yet, we must make this a
364 * mem client. If we can't open the swapin file before writing
365 * to the client, there is no guarantee that we will be able
366 * to open it later when we really need it.
367 */
368 if (swap_status == SWAPOUT_NONE)
369 return STORE_MEM_CLIENT;
370
371 /*
372 * otherwise, make subsequent clients read from disk so they
373 * can not delay the first, and vice-versa.
374 */
375 return STORE_DISK_CLIENT;
376 }
377
378 StoreEntry::StoreEntry() :
379 mem_obj(NULL),
380 timestamp(-1),
381 lastref(-1),
382 expires(-1),
383 lastmod(-1),
384 swap_file_sz(0),
385 refcount(0),
386 flags(0),
387 swap_filen(-1),
388 swap_dirn(-1),
389 mem_status(NOT_IN_MEMORY),
390 ping_status(PING_NONE),
391 store_status(STORE_PENDING),
392 swap_status(SWAPOUT_NONE),
393 lock_count(0)
394 {
395 debugs(20, 5, "StoreEntry constructed, this=" << this);
396 }
397
398 StoreEntry::~StoreEntry()
399 {
400 debugs(20, 5, "StoreEntry destructed, this=" << this);
401 }
402
403 #if USE_ADAPTATION
404 void
405 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
406 {
407 if (!deferredProducer)
408 deferredProducer = producer;
409 else
410 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
411 *deferredProducer << ", requested call: " << *producer);
412 }
413
414 void
415 StoreEntry::kickProducer()
416 {
417 if (deferredProducer != NULL) {
418 ScheduleCallHere(deferredProducer);
419 deferredProducer = NULL;
420 }
421 }
422 #endif
423
424 void
425 StoreEntry::destroyMemObject()
426 {
427 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
428
429 if (MemObject *mem = mem_obj) {
430 // Store::Root() is FATALly missing during shutdown
431 if (mem->xitTable.index >= 0 && !shutting_down)
432 Store::Root().transientsDisconnect(*mem);
433 if (mem->memCache.index >= 0 && !shutting_down)
434 Store::Root().memoryDisconnect(*this);
435
436 setMemStatus(NOT_IN_MEMORY);
437 mem_obj = NULL;
438 delete mem;
439 }
440 }
441
442 void
443 destroyStoreEntry(void *data)
444 {
445 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
446 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
447 assert(e != NULL);
448
449 if (e == NullStoreEntry::getInstance())
450 return;
451
452 // Store::Root() is FATALly missing during shutdown
453 if (e->swap_filen >= 0 && !shutting_down) {
454 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
455 sd.disconnect(*e);
456 }
457
458 e->destroyMemObject();
459
460 e->hashDelete();
461
462 assert(e->key == NULL);
463
464 delete e;
465 }
466
467 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
468
469 void
470 StoreEntry::hashInsert(const cache_key * someKey)
471 {
472 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
473 key = storeKeyDup(someKey);
474 hash_join(store_table, this);
475 }
476
477 void
478 StoreEntry::hashDelete()
479 {
480 if (key) { // some test cases do not create keys and do not hashInsert()
481 hash_remove_link(store_table, this);
482 storeKeyFree((const cache_key *)key);
483 key = NULL;
484 }
485 }
486
487 /* -------------------------------------------------------------------------- */
488
489 /* get rid of memory copy of the object */
490 void
491 StoreEntry::purgeMem()
492 {
493 if (mem_obj == NULL)
494 return;
495
496 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
497
498 Store::Root().memoryUnlink(*this);
499
500 if (swap_status != SWAPOUT_DONE)
501 release();
502 }
503
504 void
505 StoreEntry::lock(const char *context)
506 {
507 ++lock_count;
508 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
509 }
510
511 void
512 StoreEntry::touch()
513 {
514 lastref = squid_curtime;
515 Store::Root().reference(*this);
516 }
517
518 void
519 StoreEntry::setReleaseFlag()
520 {
521 if (EBIT_TEST(flags, RELEASE_REQUEST))
522 return;
523
524 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
525
526 EBIT_SET(flags, RELEASE_REQUEST);
527
528 Store::Root().markForUnlink(*this);
529 }
530
531 void
532 StoreEntry::releaseRequest()
533 {
534 if (EBIT_TEST(flags, RELEASE_REQUEST))
535 return;
536
537 setReleaseFlag(); // makes validToSend() false, preventing future hits
538
539 setPrivateKey();
540 }
541
542 int
543 StoreEntry::unlock(const char *context)
544 {
545 debugs(20, 3, (context ? context : "somebody") <<
546 " unlocking key " << getMD5Text() << ' ' << *this);
547 assert(lock_count > 0);
548 --lock_count;
549
550 if (lock_count)
551 return (int) lock_count;
552
553 if (store_status == STORE_PENDING)
554 setReleaseFlag();
555
556 assert(storePendingNClients(this) == 0);
557
558 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
559 this->release();
560 return 0;
561 }
562
563 if (EBIT_TEST(flags, KEY_PRIVATE))
564 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
565
566 Store::Root().handleIdleEntry(*this); // may delete us
567 return 0;
568 }
569
570 void
571 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
572 {
573 assert (aClient);
574 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
575
576 if (!result)
577 aClient->created (NullStoreEntry::getInstance());
578 else
579 aClient->created (result);
580 }
581
582 void
583 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
584 {
585 assert (aClient);
586 StoreEntry *result = storeGetPublicByRequest (request);
587
588 if (!result)
589 result = NullStoreEntry::getInstance();
590
591 aClient->created (result);
592 }
593
594 void
595 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
596 {
597 assert (aClient);
598 StoreEntry *result = storeGetPublic (uri, method);
599
600 if (!result)
601 result = NullStoreEntry::getInstance();
602
603 aClient->created (result);
604 }
605
606 StoreEntry *
607 storeGetPublic(const char *uri, const HttpRequestMethod& method)
608 {
609 return Store::Root().get(storeKeyPublic(uri, method));
610 }
611
612 StoreEntry *
613 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
614 {
615 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
616 }
617
618 StoreEntry *
619 storeGetPublicByRequest(HttpRequest * req)
620 {
621 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
622
623 if (e == NULL && req->method == Http::METHOD_HEAD)
624 /* We can generate a HEAD reply from a cached GET object */
625 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
626
627 return e;
628 }
629
630 static int
631 getKeyCounter(void)
632 {
633 static int key_counter = 0;
634
635 if (++key_counter < 0)
636 key_counter = 1;
637
638 return key_counter;
639 }
640
641 /* RBC 20050104 AFAICT this should become simpler:
642 * rather than reinserting with a special key it should be marked
643 * as 'released' and then cleaned up when refcounting indicates.
644 * the StoreHashIndex could well implement its 'released' in the
645 * current manner.
646 * Also, clean log writing should skip over ia,t
647 * Otherwise, we need a 'remove from the index but not the store
648 * concept'.
649 */
650 void
651 StoreEntry::setPrivateKey()
652 {
653 const cache_key *newkey;
654
655 if (key && EBIT_TEST(flags, KEY_PRIVATE))
656 return; /* is already private */
657
658 if (key) {
659 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
660
661 // TODO: move into SwapDir::markForUnlink() already called by Root()
662 if (swap_filen > -1)
663 storeDirSwapLog(this, SWAP_LOG_DEL);
664
665 hashDelete();
666 }
667
668 if (mem_obj && mem_obj->hasUris()) {
669 mem_obj->id = getKeyCounter();
670 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
671 } else {
672 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
673 }
674
675 assert(hash_lookup(store_table, newkey) == NULL);
676 EBIT_SET(flags, KEY_PRIVATE);
677 hashInsert(newkey);
678 }
679
680 void
681 StoreEntry::setPublicKey()
682 {
683 const cache_key *newkey;
684
685 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
686 return; /* is already public */
687
688 assert(mem_obj);
689
690 /*
691 * We can't make RELEASE_REQUEST objects public. Depending on
692 * when RELEASE_REQUEST gets set, we might not be swapping out
693 * the object. If we're not swapping out, then subsequent
694 * store clients won't be able to access object data which has
695 * been freed from memory.
696 *
697 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
698 */
699 #if MORE_DEBUG_OUTPUT
700
701 if (EBIT_TEST(flags, RELEASE_REQUEST))
702 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
703
704 #endif
705
706 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
707
708 if (mem_obj->request) {
709 HttpRequest *request = mem_obj->request;
710
711 if (!mem_obj->vary_headers) {
712 /* First handle the case where the object no longer varies */
713 safe_free(request->vary_headers);
714 } else {
715 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
716 /* Oops.. the variance has changed. Kill the base object
717 * to record the new variance key
718 */
719 safe_free(request->vary_headers); /* free old "bad" variance key */
720 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
721 pe->release();
722 }
723
724 /* Make sure the request knows the variance status */
725 if (!request->vary_headers) {
726 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
727
728 if (vary)
729 request->vary_headers = xstrdup(vary);
730 }
731 }
732
733 // TODO: storeGetPublic() calls below may create unlocked entries.
734 // We should add/use storeHas() API or lock/unlock those entries.
735 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
736 /* Create "vary" base object */
737 String vary;
738 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
739 /* We are allowed to do this typecast */
740 HttpReply *rep = new HttpReply;
741 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
742 vary = mem_obj->getReply()->header.getList(HDR_VARY);
743
744 if (vary.size()) {
745 /* Again, we own this structure layout */
746 rep->header.putStr(HDR_VARY, vary.termedBuf());
747 vary.clean();
748 }
749
750 #if X_ACCELERATOR_VARY
751 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
752
753 if (vary.size() > 0) {
754 /* Again, we own this structure layout */
755 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
756 vary.clean();
757 }
758
759 #endif
760 pe->replaceHttpReply(rep, false); // no write until key is public
761
762 pe->timestampsSet();
763
764 pe->makePublic();
765
766 pe->startWriting(); // after makePublic()
767
768 pe->complete();
769
770 pe->unlock("StoreEntry::setPublicKey+Vary");
771 }
772
773 newkey = storeKeyPublicByRequest(mem_obj->request);
774 } else
775 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
776
777 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
778 debugs(20, 3, "Making old " << *e2 << " private.");
779 e2->setPrivateKey();
780 e2->release();
781
782 if (mem_obj->request)
783 newkey = storeKeyPublicByRequest(mem_obj->request);
784 else
785 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
786 }
787
788 if (key)
789 hashDelete();
790
791 EBIT_CLR(flags, KEY_PRIVATE);
792
793 hashInsert(newkey);
794
795 if (swap_filen > -1)
796 storeDirSwapLog(this, SWAP_LOG_ADD);
797 }
798
799 StoreEntry *
800 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
801 {
802 StoreEntry *e = NULL;
803 debugs(20, 3, "storeCreateEntry: '" << url << "'");
804
805 e = new StoreEntry();
806 e->makeMemObject();
807 e->mem_obj->setUris(url, log_url, method);
808
809 if (flags.cachable) {
810 EBIT_CLR(e->flags, RELEASE_REQUEST);
811 } else {
812 e->releaseRequest();
813 }
814
815 e->store_status = STORE_PENDING;
816 e->refcount = 0;
817 e->lastref = squid_curtime;
818 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
819 e->ping_status = PING_NONE;
820 EBIT_SET(e->flags, ENTRY_VALIDATED);
821 return e;
822 }
823
824 StoreEntry *
825 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
826 {
827 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
828 e->lock("storeCreateEntry");
829
830 if (neighbors_do_private_keys || !flags.hierarchical)
831 e->setPrivateKey();
832 else
833 e->setPublicKey();
834
835 return e;
836 }
837
838 /* Mark object as expired */
839 void
840 StoreEntry::expireNow()
841 {
842 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
843 expires = squid_curtime;
844 }
845
846 void
847 StoreEntry::write (StoreIOBuffer writeBuffer)
848 {
849 assert(mem_obj != NULL);
850 /* This assert will change when we teach the store to update */
851 PROF_start(StoreEntry_write);
852 assert(store_status == STORE_PENDING);
853
854 // XXX: caller uses content offset, but we also store headers
855 if (const HttpReply *reply = mem_obj->getReply())
856 writeBuffer.offset += reply->hdr_sz;
857
858 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
859 PROF_stop(StoreEntry_write);
860 storeGetMemSpace(writeBuffer.length);
861 mem_obj->write(writeBuffer);
862
863 if (!EBIT_TEST(flags, DELAY_SENDING))
864 invokeHandlers();
865 }
866
867 /* Append incoming data from a primary server to an entry. */
868 void
869 StoreEntry::append(char const *buf, int len)
870 {
871 assert(mem_obj != NULL);
872 assert(len >= 0);
873 assert(store_status == STORE_PENDING);
874
875 StoreIOBuffer tempBuffer;
876 tempBuffer.data = (char *)buf;
877 tempBuffer.length = len;
878 /*
879 * XXX sigh, offset might be < 0 here, but it gets "corrected"
880 * later. This offset crap is such a mess.
881 */
882 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
883 write(tempBuffer);
884 }
885
886 void
887 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
888 {
889 va_list args;
890 va_start(args, fmt);
891
892 storeAppendVPrintf(e, fmt, args);
893 va_end(args);
894 }
895
896 /* used be storeAppendPrintf and Packer */
897 void
898 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
899 {
900 LOCAL_ARRAY(char, buf, 4096);
901 buf[0] = '\0';
902 vsnprintf(buf, 4096, fmt, vargs);
903 e->append(buf, strlen(buf));
904 }
905
906 struct _store_check_cachable_hist {
907
908 struct {
909 int non_get;
910 int not_entry_cachable;
911 int wrong_content_length;
912 int negative_cached;
913 int too_big;
914 int too_small;
915 int private_key;
916 int too_many_open_files;
917 int too_many_open_fds;
918 } no;
919
920 struct {
921 int Default;
922 } yes;
923 } store_check_cachable_hist;
924
925 int
926 storeTooManyDiskFilesOpen(void)
927 {
928 if (Config.max_open_disk_fds == 0)
929 return 0;
930
931 if (store_open_disk_fd > Config.max_open_disk_fds)
932 return 1;
933
934 return 0;
935 }
936
937 int
938 StoreEntry::checkTooSmall()
939 {
940 if (EBIT_TEST(flags, ENTRY_SPECIAL))
941 return 0;
942
943 if (STORE_OK == store_status)
944 if (mem_obj->object_sz >= 0 &&
945 mem_obj->object_sz < Config.Store.minObjectSize)
946 return 1;
947 if (getReply()->content_length > -1)
948 if (getReply()->content_length < Config.Store.minObjectSize)
949 return 1;
950 return 0;
951 }
952
953 // TODO: move "too many open..." checks outside -- we are called too early/late
954 bool
955 StoreEntry::checkCachable()
956 {
957 // XXX: This method is used for both memory and disk caches, but some
958 // checks are specific to disk caches. Move them to mayStartSwapOut().
959
960 // XXX: This method may be called several times, sometimes with different
961 // outcomes, making store_check_cachable_hist counters misleading.
962
963 // check this first to optimize handling of repeated calls for uncachables
964 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
965 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
966 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
967 return 0; // avoid rerequesting release below
968 }
969
970 #if CACHE_ALL_METHODS
971
972 if (mem_obj->method != Http::METHOD_GET) {
973 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
974 ++store_check_cachable_hist.no.non_get;
975 } else
976 #endif
977 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
978 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
979 ++store_check_cachable_hist.no.wrong_content_length;
980 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
981 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
982 ++store_check_cachable_hist.no.negative_cached;
983 return 0; /* avoid release call below */
984 } else if ((getReply()->content_length > 0 &&
985 getReply()->content_length > store_maxobjsize) ||
986 mem_obj->endOffset() > store_maxobjsize) {
987 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
988 ++store_check_cachable_hist.no.too_big;
989 } else if (checkTooSmall()) {
990 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
991 ++store_check_cachable_hist.no.too_small;
992 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
993 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
994 ++store_check_cachable_hist.no.private_key;
995 } else if (swap_status != SWAPOUT_NONE) {
996 /*
997 * here we checked the swap_status because the remaining
998 * cases are only relevant only if we haven't started swapping
999 * out the object yet.
1000 */
1001 return 1;
1002 } else if (storeTooManyDiskFilesOpen()) {
1003 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1004 ++store_check_cachable_hist.no.too_many_open_files;
1005 } else if (fdNFree() < RESERVED_FD) {
1006 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1007 ++store_check_cachable_hist.no.too_many_open_fds;
1008 } else {
1009 ++store_check_cachable_hist.yes.Default;
1010 return 1;
1011 }
1012
1013 releaseRequest();
1014 return 0;
1015 }
1016
1017 void
1018 storeCheckCachableStats(StoreEntry *sentry)
1019 {
1020 storeAppendPrintf(sentry, "Category\t Count\n");
1021
1022 #if CACHE_ALL_METHODS
1023
1024 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1025 store_check_cachable_hist.no.non_get);
1026 #endif
1027
1028 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1029 store_check_cachable_hist.no.not_entry_cachable);
1030 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1031 store_check_cachable_hist.no.wrong_content_length);
1032 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1033 store_check_cachable_hist.no.negative_cached);
1034 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1035 store_check_cachable_hist.no.too_big);
1036 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1037 store_check_cachable_hist.no.too_small);
1038 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1039 store_check_cachable_hist.no.private_key);
1040 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1041 store_check_cachable_hist.no.too_many_open_files);
1042 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1043 store_check_cachable_hist.no.too_many_open_fds);
1044 storeAppendPrintf(sentry, "yes.default\t%d\n",
1045 store_check_cachable_hist.yes.Default);
1046 }
1047
1048 void
1049 StoreEntry::complete()
1050 {
1051 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1052
1053 if (store_status != STORE_PENDING) {
1054 /*
1055 * if we're not STORE_PENDING, then probably we got aborted
1056 * and there should be NO clients on this entry
1057 */
1058 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1059 assert(mem_obj->nclients == 0);
1060 return;
1061 }
1062
1063 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1064 * in use of object_sz?
1065 */
1066 mem_obj->object_sz = mem_obj->endOffset();
1067
1068 store_status = STORE_OK;
1069
1070 assert(mem_status == NOT_IN_MEMORY);
1071
1072 if (!validLength()) {
1073 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1074 releaseRequest();
1075 }
1076
1077 #if USE_CACHE_DIGESTS
1078 if (mem_obj->request)
1079 mem_obj->request->hier.store_complete_stop = current_time;
1080
1081 #endif
1082 /*
1083 * We used to call invokeHandlers, then storeSwapOut. However,
1084 * Madhukar Reddy <myreddy@persistence.com> reported that
1085 * responses without content length would sometimes get released
1086 * in client_side, thinking that the response is incomplete.
1087 */
1088 invokeHandlers();
1089 }
1090
1091 /*
1092 * Someone wants to abort this transfer. Set the reason in the
1093 * request structure, call the server-side callback and mark the
1094 * entry for releasing
1095 */
1096 void
1097 StoreEntry::abort()
1098 {
1099 ++statCounter.aborted_requests;
1100 assert(store_status == STORE_PENDING);
1101 assert(mem_obj != NULL);
1102 debugs(20, 6, "storeAbort: " << getMD5Text());
1103
1104 lock("StoreEntry::abort"); /* lock while aborting */
1105 negativeCache();
1106
1107 releaseRequest();
1108
1109 EBIT_SET(flags, ENTRY_ABORTED);
1110
1111 setMemStatus(NOT_IN_MEMORY);
1112
1113 store_status = STORE_OK;
1114
1115 /* Notify the server side */
1116
1117 /*
1118 * DPW 2007-05-07
1119 * Should we check abort.data for validity?
1120 */
1121 if (mem_obj->abort.callback) {
1122 if (!cbdataReferenceValid(mem_obj->abort.data))
1123 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1124 eventAdd("mem_obj->abort.callback",
1125 mem_obj->abort.callback,
1126 mem_obj->abort.data,
1127 0.0,
1128 true);
1129 unregisterAbort();
1130 }
1131
1132 /* XXX Should we reverse these two, so that there is no
1133 * unneeded disk swapping triggered?
1134 */
1135 /* Notify the client side */
1136 invokeHandlers();
1137
1138 // abort swap out, invalidating what was created so far (release follows)
1139 swapOutFileClose(StoreIOState::writerGone);
1140
1141 unlock("StoreEntry::abort"); /* unlock */
1142 }
1143
1144 /**
1145 * Clear Memory storage to accommodate the given object len
1146 */
1147 void
1148 storeGetMemSpace(int size)
1149 {
1150 PROF_start(storeGetMemSpace);
1151 StoreEntry *e = NULL;
1152 int released = 0;
1153 static time_t last_check = 0;
1154 size_t pages_needed;
1155 RemovalPurgeWalker *walker;
1156
1157 if (squid_curtime == last_check) {
1158 PROF_stop(storeGetMemSpace);
1159 return;
1160 }
1161
1162 last_check = squid_curtime;
1163
1164 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1165
1166 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1167 PROF_stop(storeGetMemSpace);
1168 return;
1169 }
1170
1171 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1172 " pages");
1173
1174 /* XXX what to set as max_scan here? */
1175 walker = mem_policy->PurgeInit(mem_policy, 100000);
1176
1177 while ((e = walker->Next(walker))) {
1178 e->purgeMem();
1179 ++released;
1180
1181 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1182 break;
1183 }
1184
1185 walker->Done(walker);
1186 debugs(20, 3, "storeGetMemSpace stats:");
1187 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1188 debugs(20, 3, " " << std::setw(6) << released << " were released");
1189 PROF_stop(storeGetMemSpace);
1190 }
1191
1192 /* thunk through to Store::Root().maintain(). Note that this would be better still
1193 * if registered against the root store itself, but that requires more complex
1194 * update logic - bigger fish to fry first. Long term each store when
1195 * it becomes active will self register
1196 */
1197 void
1198 Store::Maintain(void *notused)
1199 {
1200 Store::Root().maintain();
1201
1202 /* Reregister a maintain event .. */
1203 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1204
1205 }
1206
1207 /* The maximum objects to scan for maintain storage space */
1208 #define MAINTAIN_MAX_SCAN 1024
1209 #define MAINTAIN_MAX_REMOVE 64
1210
1211 /*
1212 * This routine is to be called by main loop in main.c.
1213 * It removes expired objects on only one bucket for each time called.
1214 *
1215 * This should get called 1/s from main().
1216 */
1217 void
1218 StoreController::maintain()
1219 {
1220 static time_t last_warn_time = 0;
1221
1222 PROF_start(storeMaintainSwapSpace);
1223 swapDir->maintain();
1224
1225 /* this should be emitted by the oversize dir, not globally */
1226
1227 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1228 if (squid_curtime - last_warn_time > 10) {
1229 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1230 << Store::Root().currentSize() / 1024.0 << " KB > "
1231 << (Store::Root().maxSize() >> 10) << " KB");
1232 last_warn_time = squid_curtime;
1233 }
1234 }
1235
1236 PROF_stop(storeMaintainSwapSpace);
1237 }
1238
1239 /* release an object from a cache */
1240 void
1241 StoreEntry::release()
1242 {
1243 PROF_start(storeRelease);
1244 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1245 /* If, for any reason we can't discard this object because of an
1246 * outstanding request, mark it for pending release */
1247
1248 if (locked()) {
1249 expireNow();
1250 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1251 releaseRequest();
1252 PROF_stop(storeRelease);
1253 return;
1254 }
1255
1256 Store::Root().memoryUnlink(*this);
1257
1258 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1259 setPrivateKey();
1260
1261 if (swap_filen > -1) {
1262 // lock the entry until rebuilding is done
1263 lock("storeLateRelease");
1264 setReleaseFlag();
1265 LateReleaseStack.push(this);
1266 } else {
1267 destroyStoreEntry(static_cast<hash_link *>(this));
1268 // "this" is no longer valid
1269 }
1270
1271 PROF_stop(storeRelease);
1272 return;
1273 }
1274
1275 storeLog(STORE_LOG_RELEASE, this);
1276
1277 if (swap_filen > -1) {
1278 // log before unlink() below clears swap_filen
1279 if (!EBIT_TEST(flags, KEY_PRIVATE))
1280 storeDirSwapLog(this, SWAP_LOG_DEL);
1281
1282 unlink();
1283 }
1284
1285 destroyStoreEntry(static_cast<hash_link *>(this));
1286 PROF_stop(storeRelease);
1287 }
1288
1289 static void
1290 storeLateRelease(void *unused)
1291 {
1292 StoreEntry *e;
1293 static int n = 0;
1294
1295 if (StoreController::store_dirs_rebuilding) {
1296 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1297 return;
1298 }
1299
1300 // TODO: this works but looks unelegant.
1301 for (int i = 0; i < 10; ++i) {
1302 if (LateReleaseStack.empty()) {
1303 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1304 return;
1305 } else {
1306 e = LateReleaseStack.top();
1307 LateReleaseStack.pop();
1308 }
1309
1310 e->unlock("storeLateRelease");
1311 ++n;
1312 }
1313
1314 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1315 }
1316
1317 /* return 1 if a store entry is locked */
1318 int
1319 StoreEntry::locked() const
1320 {
1321 if (lock_count)
1322 return 1;
1323
1324 /*
1325 * SPECIAL, PUBLIC entries should be "locked";
1326 * XXX: Their owner should lock them then instead of relying on this hack.
1327 */
1328 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1329 if (!EBIT_TEST(flags, KEY_PRIVATE))
1330 return 1;
1331
1332 return 0;
1333 }
1334
1335 bool
1336 StoreEntry::validLength() const
1337 {
1338 int64_t diff;
1339 const HttpReply *reply;
1340 assert(mem_obj != NULL);
1341 reply = getReply();
1342 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1343 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1344 objectLen());
1345 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1346 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1347
1348 if (reply->content_length < 0) {
1349 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1350 return 1;
1351 }
1352
1353 if (reply->hdr_sz == 0) {
1354 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1355 return 1;
1356 }
1357
1358 if (mem_obj->method == Http::METHOD_HEAD) {
1359 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1360 return 1;
1361 }
1362
1363 if (reply->sline.status() == Http::scNotModified)
1364 return 1;
1365
1366 if (reply->sline.status() == Http::scNoContent)
1367 return 1;
1368
1369 diff = reply->hdr_sz + reply->content_length - objectLen();
1370
1371 if (diff == 0)
1372 return 1;
1373
1374 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1375
1376 return 0;
1377 }
1378
1379 static void
1380 storeRegisterWithCacheManager(void)
1381 {
1382 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1383 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1384 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1385 storeCheckCachableStats, 0, 1);
1386 }
1387
1388 void
1389 storeInit(void)
1390 {
1391 storeKeyInit();
1392 mem_policy = createRemovalPolicy(Config.memPolicy);
1393 storeDigestInit();
1394 storeLogOpen();
1395 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1396 Store::Root().init();
1397 storeRebuildStart();
1398
1399 storeRegisterWithCacheManager();
1400 }
1401
1402 /// computes maximum size of a cachable object
1403 /// larger objects are rejected by all (disk and memory) cache stores
1404 static int64_t
1405 storeCalcMaxObjSize()
1406 {
1407 int64_t ms = 0; // nothing can be cached without at least one store consent
1408
1409 // global maximum is at least the disk store maximum
1410 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1411 assert (Config.cacheSwap.swapDirs[i].getRaw());
1412 const int64_t storeMax = dynamic_cast<SwapDir *>(Config.cacheSwap.swapDirs[i].getRaw())->maxObjectSize();
1413 if (ms < storeMax)
1414 ms = storeMax;
1415 }
1416
1417 // global maximum is at least the memory store maximum
1418 // TODO: move this into a memory cache class when we have one
1419 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
1420 if (ms < memMax)
1421 ms = memMax;
1422
1423 return ms;
1424 }
1425
1426 void
1427 storeConfigure(void)
1428 {
1429 store_swap_high = (long) (((float) Store::Root().maxSize() *
1430 (float) Config.Swap.highWaterMark) / (float) 100);
1431 store_swap_low = (long) (((float) Store::Root().maxSize() *
1432 (float) Config.Swap.lowWaterMark) / (float) 100);
1433 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1434
1435 store_maxobjsize = storeCalcMaxObjSize();
1436 }
1437
1438 bool
1439 StoreEntry::memoryCachable()
1440 {
1441 if (!checkCachable())
1442 return 0;
1443
1444 if (mem_obj == NULL)
1445 return 0;
1446
1447 if (mem_obj->data_hdr.size() == 0)
1448 return 0;
1449
1450 if (mem_obj->inmem_lo != 0)
1451 return 0;
1452
1453 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1454 return 0;
1455
1456 return 1;
1457 }
1458
1459 int
1460 StoreEntry::checkNegativeHit() const
1461 {
1462 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1463 return 0;
1464
1465 if (expires <= squid_curtime)
1466 return 0;
1467
1468 if (store_status != STORE_OK)
1469 return 0;
1470
1471 return 1;
1472 }
1473
1474 /**
1475 * Set object for negative caching.
1476 * Preserves any expiry information given by the server.
1477 * In absence of proper expiry info it will set to expire immediately,
1478 * or with HTTP-violations enabled the configured negative-TTL is observed
1479 */
1480 void
1481 StoreEntry::negativeCache()
1482 {
1483 // XXX: should make the default for expires 0 instead of -1
1484 // so we can distinguish "Expires: -1" from nothing.
1485 if (expires <= 0)
1486 #if USE_HTTP_VIOLATIONS
1487 expires = squid_curtime + Config.negativeTtl;
1488 #else
1489 expires = squid_curtime;
1490 #endif
1491 EBIT_SET(flags, ENTRY_NEGCACHED);
1492 }
1493
1494 void
1495 storeFreeMemory(void)
1496 {
1497 Store::Root(NULL);
1498 #if USE_CACHE_DIGESTS
1499
1500 if (store_digest)
1501 cacheDigestDestroy(store_digest);
1502
1503 #endif
1504
1505 store_digest = NULL;
1506 }
1507
1508 int
1509 expiresMoreThan(time_t expires, time_t when)
1510 {
1511 if (expires < 0) /* No Expires given */
1512 return 1;
1513
1514 return (expires > (squid_curtime + when));
1515 }
1516
1517 int
1518 StoreEntry::validToSend() const
1519 {
1520 if (EBIT_TEST(flags, RELEASE_REQUEST))
1521 return 0;
1522
1523 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1524 if (expires <= squid_curtime)
1525 return 0;
1526
1527 if (EBIT_TEST(flags, ENTRY_ABORTED))
1528 return 0;
1529
1530 // now check that the entry has a cache backing or is collapsed
1531 if (swap_filen > -1) // backed by a disk cache
1532 return 1;
1533
1534 if (swappingOut()) // will be backed by a disk cache
1535 return 1;
1536
1537 if (!mem_obj) // not backed by a memory cache and not collapsed
1538 return 0;
1539
1540 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1541 // disk cache backing that store_client constructor will assert. XXX: This
1542 // is wrong for range requests (that could feed off nibbled memory) and for
1543 // entries backed by the shared memory cache (that could, in theory, get
1544 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1545 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1546 return 0;
1547
1548 // The following check is correct but useless at this position. TODO: Move
1549 // it up when the shared memory cache can either replenish locally nibbled
1550 // bytes or, better, does not use local RAM copy at all.
1551 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1552 // return 1;
1553
1554 return 1;
1555 }
1556
1557 void
1558 StoreEntry::timestampsSet()
1559 {
1560 const HttpReply *reply = getReply();
1561 time_t served_date = reply->date;
1562 int age = reply->header.getInt(HDR_AGE);
1563 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1564 /* make sure that 0 <= served_date <= squid_curtime */
1565
1566 if (served_date < 0 || served_date > squid_curtime)
1567 served_date = squid_curtime;
1568
1569 /* Bug 1791:
1570 * If the returned Date: is more than 24 hours older than
1571 * the squid_curtime, then one of us needs to use NTP to set our
1572 * clock. We'll pretend that our clock is right.
1573 */
1574 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1575 served_date = squid_curtime;
1576
1577 /*
1578 * Compensate with Age header if origin server clock is ahead
1579 * of us and there is a cache in between us and the origin
1580 * server. But DONT compensate if the age value is larger than
1581 * squid_curtime because it results in a negative served_date.
1582 */
1583 if (age > squid_curtime - served_date)
1584 if (squid_curtime > age)
1585 served_date = squid_curtime - age;
1586
1587 // compensate for Squid-to-server and server-to-Squid delays
1588 if (mem_obj && mem_obj->request) {
1589 const time_t request_sent =
1590 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1591 if (0 < request_sent && request_sent < squid_curtime)
1592 served_date -= (squid_curtime - request_sent);
1593 }
1594
1595 if (reply->expires > 0 && reply->date > -1)
1596 expires = served_date + (reply->expires - reply->date);
1597 else
1598 expires = reply->expires;
1599
1600 lastmod = reply->last_modified;
1601
1602 timestamp = served_date;
1603 }
1604
1605 void
1606 StoreEntry::registerAbort(STABH * cb, void *data)
1607 {
1608 assert(mem_obj);
1609 assert(mem_obj->abort.callback == NULL);
1610 mem_obj->abort.callback = cb;
1611 mem_obj->abort.data = cbdataReference(data);
1612 }
1613
1614 void
1615 StoreEntry::unregisterAbort()
1616 {
1617 assert(mem_obj);
1618 if (mem_obj->abort.callback) {
1619 mem_obj->abort.callback = NULL;
1620 cbdataReferenceDone(mem_obj->abort.data);
1621 }
1622 }
1623
1624 void
1625 StoreEntry::dump(int l) const
1626 {
1627 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1628 debugs(20, l, "StoreEntry->next: " << next);
1629 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1630 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1631 debugs(20, l, "StoreEntry->lastref: " << lastref);
1632 debugs(20, l, "StoreEntry->expires: " << expires);
1633 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1634 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1635 debugs(20, l, "StoreEntry->refcount: " << refcount);
1636 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1637 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1638 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1639 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1640 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1641 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1642 debugs(20, l, "StoreEntry->store_status: " << store_status);
1643 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1644 }
1645
1646 /*
1647 * NOTE, this function assumes only two mem states
1648 */
1649 void
1650 StoreEntry::setMemStatus(mem_status_t new_status)
1651 {
1652 if (new_status == mem_status)
1653 return;
1654
1655 // are we using a shared memory cache?
1656 if (Config.memShared && IamWorkerProcess()) {
1657 // This method was designed to update replacement policy, not to
1658 // actually purge something from the memory cache (TODO: rename?).
1659 // Shared memory cache does not have a policy that needs updates.
1660 mem_status = new_status;
1661 return;
1662 }
1663
1664 assert(mem_obj != NULL);
1665
1666 if (new_status == IN_MEMORY) {
1667 assert(mem_obj->inmem_lo == 0);
1668
1669 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1670 debugs(20, 4, "not inserting special " << *this << " into policy");
1671 } else {
1672 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1673 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1674 }
1675
1676 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1677 } else {
1678 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1679 debugs(20, 4, "not removing special " << *this << " from policy");
1680 } else {
1681 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1682 debugs(20, 4, "removed " << *this);
1683 }
1684
1685 --hot_obj_count;
1686 }
1687
1688 mem_status = new_status;
1689 }
1690
1691 const char *
1692 StoreEntry::url() const
1693 {
1694 if (this == NULL)
1695 return "[null_entry]";
1696 else if (mem_obj == NULL)
1697 return "[null_mem_obj]";
1698 else
1699 return mem_obj->storeId();
1700 }
1701
1702 MemObject *
1703 StoreEntry::makeMemObject()
1704 {
1705 if (!mem_obj)
1706 mem_obj = new MemObject();
1707 return mem_obj;
1708 }
1709
1710 void
1711 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1712 {
1713 makeMemObject();
1714 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1715 }
1716
1717 /* this just sets DELAY_SENDING */
1718 void
1719 StoreEntry::buffer()
1720 {
1721 EBIT_SET(flags, DELAY_SENDING);
1722 }
1723
1724 /* this just clears DELAY_SENDING and Invokes the handlers */
1725 void
1726 StoreEntry::flush()
1727 {
1728 if (EBIT_TEST(flags, DELAY_SENDING)) {
1729 EBIT_CLR(flags, DELAY_SENDING);
1730 invokeHandlers();
1731 }
1732 }
1733
1734 int64_t
1735 StoreEntry::objectLen() const
1736 {
1737 assert(mem_obj != NULL);
1738 return mem_obj->object_sz;
1739 }
1740
1741 int64_t
1742 StoreEntry::contentLen() const
1743 {
1744 assert(mem_obj != NULL);
1745 assert(getReply() != NULL);
1746 return objectLen() - getReply()->hdr_sz;
1747 }
1748
1749 HttpReply const *
1750 StoreEntry::getReply () const
1751 {
1752 if (NULL == mem_obj)
1753 return NULL;
1754
1755 return mem_obj->getReply();
1756 }
1757
1758 void
1759 StoreEntry::reset()
1760 {
1761 assert (mem_obj);
1762 debugs(20, 3, "StoreEntry::reset: " << url());
1763 mem_obj->reset();
1764 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1765 rep->reset();
1766 expires = lastmod = timestamp = -1;
1767 }
1768
1769 /*
1770 * storeFsInit
1771 *
1772 * This routine calls the SETUP routine for each fs type.
1773 * I don't know where the best place for this is, and I'm not going to shuffle
1774 * around large chunks of code right now (that can be done once its working.)
1775 */
1776 void
1777 storeFsInit(void)
1778 {
1779 storeReplSetup();
1780 }
1781
1782 /*
1783 * called to add another store removal policy module
1784 */
1785 void
1786 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1787 {
1788 int i;
1789
1790 /* find the number of currently known repl types */
1791 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1792 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1793 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1794 return;
1795 }
1796 }
1797
1798 /* add the new type */
1799 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1800
1801 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1802
1803 storerepl_list[i].typestr = type;
1804
1805 storerepl_list[i].create = create;
1806 }
1807
1808 /*
1809 * Create a removal policy instance
1810 */
1811 RemovalPolicy *
1812 createRemovalPolicy(RemovalPolicySettings * settings)
1813 {
1814 storerepl_entry_t *r;
1815
1816 for (r = storerepl_list; r && r->typestr; ++r) {
1817 if (strcmp(r->typestr, settings->type) == 0)
1818 return r->create(settings->args);
1819 }
1820
1821 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1822 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1823 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1824 fatalf("ERROR: Unknown policy %s\n", settings->type);
1825 return NULL; /* NOTREACHED */
1826 }
1827
1828 #if 0
1829 void
1830 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1831 {
1832 if (e->swap_file_number == filn)
1833 return;
1834
1835 if (filn < 0) {
1836 assert(-1 == filn);
1837 storeDirMapBitReset(e->swap_file_number);
1838 storeDirLRUDelete(e);
1839 e->swap_file_number = -1;
1840 } else {
1841 assert(-1 == e->swap_file_number);
1842 storeDirMapBitSet(e->swap_file_number = filn);
1843 storeDirLRUAdd(e);
1844 }
1845 }
1846
1847 #endif
1848
1849 /*
1850 * Replace a store entry with
1851 * a new reply. This eats the reply.
1852 */
1853 void
1854 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1855 {
1856 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1857
1858 if (!mem_obj) {
1859 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1860 return;
1861 }
1862
1863 mem_obj->replaceHttpReply(rep);
1864
1865 if (andStartWriting)
1866 startWriting();
1867 }
1868
1869 void
1870 StoreEntry::startWriting()
1871 {
1872 Packer p;
1873
1874 /* TODO: when we store headers serparately remove the header portion */
1875 /* TODO: mark the length of the headers ? */
1876 /* We ONLY want the headers */
1877 packerToStoreInit(&p, this);
1878
1879 assert (isEmpty());
1880 assert(mem_obj);
1881
1882 const HttpReply *rep = getReply();
1883 assert(rep);
1884
1885 rep->packHeadersInto(&p);
1886 mem_obj->markEndOfReplyHeaders();
1887 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1888
1889 rep->body.packInto(&p);
1890
1891 packerClean(&p);
1892 }
1893
1894 char const *
1895 StoreEntry::getSerialisedMetaData()
1896 {
1897 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1898 int swap_hdr_sz;
1899 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1900 storeSwapTLVFree(tlv_list);
1901 assert (swap_hdr_sz >= 0);
1902 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1903 return result;
1904 }
1905
1906 /**
1907 * Abandon the transient entry our worker has created if neither the shared
1908 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1909 * any, should notice and use Plan B instead of getting stuck waiting for us
1910 * to start swapping the entry out.
1911 */
1912 void
1913 StoreEntry::transientsAbandonmentCheck()
1914 {
1915 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1916 mem_obj->xitTable.index >= 0 && // other workers may be interested
1917 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1918 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1919 debugs(20, 7, "cannot be shared: " << *this);
1920 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1921 Store::Root().transientsAbandon(*this);
1922 }
1923 }
1924
1925 void
1926 StoreEntry::memOutDecision(const bool willCacheInRam)
1927 {
1928 transientsAbandonmentCheck();
1929 }
1930
1931 void
1932 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1933 {
1934 // Abandon our transient entry if neither shared memory nor disk wants it.
1935 assert(mem_obj);
1936 mem_obj->swapout.decision = decision;
1937 transientsAbandonmentCheck();
1938 }
1939
1940 void
1941 StoreEntry::trimMemory(const bool preserveSwappable)
1942 {
1943 /*
1944 * DPW 2007-05-09
1945 * Bug #1943. We must not let go any data for IN_MEMORY
1946 * objects. We have to wait until the mem_status changes.
1947 */
1948 if (mem_status == IN_MEMORY)
1949 return;
1950
1951 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1952 return; // cannot trim because we do not load them again
1953
1954 if (preserveSwappable)
1955 mem_obj->trimSwappable();
1956 else
1957 mem_obj->trimUnSwappable();
1958
1959 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1960 }
1961
1962 bool
1963 StoreEntry::modifiedSince(HttpRequest * request) const
1964 {
1965 int object_length;
1966 time_t mod_time = lastmod;
1967
1968 if (mod_time < 0)
1969 mod_time = timestamp;
1970
1971 debugs(88, 3, "modifiedSince: '" << url() << "'");
1972
1973 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1974
1975 if (mod_time < 0)
1976 return true;
1977
1978 /* Find size of the object */
1979 object_length = getReply()->content_length;
1980
1981 if (object_length < 0)
1982 object_length = contentLen();
1983
1984 if (mod_time > request->ims) {
1985 debugs(88, 3, "--> YES: entry newer than client");
1986 return true;
1987 } else if (mod_time < request->ims) {
1988 debugs(88, 3, "--> NO: entry older than client");
1989 return false;
1990 } else if (request->imslen < 0) {
1991 debugs(88, 3, "--> NO: same LMT, no client length");
1992 return false;
1993 } else if (request->imslen == object_length) {
1994 debugs(88, 3, "--> NO: same LMT, same length");
1995 return false;
1996 } else {
1997 debugs(88, 3, "--> YES: same LMT, different length");
1998 return true;
1999 }
2000 }
2001
2002 bool
2003 StoreEntry::hasEtag(ETag &etag) const
2004 {
2005 if (const HttpReply *reply = getReply()) {
2006 etag = reply->header.getETag(HDR_ETAG);
2007 if (etag.str)
2008 return true;
2009 }
2010 return false;
2011 }
2012
2013 bool
2014 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2015 {
2016 const String reqETags = request.header.getList(HDR_IF_MATCH);
2017 return hasOneOfEtags(reqETags, false);
2018 }
2019
2020 bool
2021 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2022 {
2023 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
2024 // weak comparison is allowed only for HEAD or full-body GET requests
2025 const bool allowWeakMatch = !request.flags.isRanged &&
2026 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
2027 return hasOneOfEtags(reqETags, allowWeakMatch);
2028 }
2029
2030 /// whether at least one of the request ETags matches entity ETag
2031 bool
2032 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2033 {
2034 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
2035 if (!repETag.str)
2036 return strListIsMember(&reqETags, "*", ',');
2037
2038 bool matched = false;
2039 const char *pos = NULL;
2040 const char *item;
2041 int ilen;
2042 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2043 if (!strncmp(item, "*", ilen))
2044 matched = true;
2045 else {
2046 String str;
2047 str.append(item, ilen);
2048 ETag reqETag;
2049 if (etagParseInit(&reqETag, str.termedBuf())) {
2050 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2051 etagIsStrongEqual(repETag, reqETag);
2052 }
2053 }
2054 }
2055 return matched;
2056 }
2057
2058 SwapDir::Pointer
2059 StoreEntry::store() const
2060 {
2061 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2062 return INDEXSD(swap_dirn);
2063 }
2064
2065 void
2066 StoreEntry::unlink()
2067 {
2068 store()->unlink(*this); // implies disconnect()
2069 swap_filen = -1;
2070 swap_dirn = -1;
2071 swap_status = SWAPOUT_NONE;
2072 }
2073
2074 /*
2075 * return true if the entry is in a state where
2076 * it can accept more data (ie with write() method)
2077 */
2078 bool
2079 StoreEntry::isAccepting() const
2080 {
2081 if (STORE_PENDING != store_status)
2082 return false;
2083
2084 if (EBIT_TEST(flags, ENTRY_ABORTED))
2085 return false;
2086
2087 return true;
2088 }
2089
2090 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2091 {
2092 os << "e:";
2093
2094 if (e.mem_obj) {
2095 if (e.mem_obj->xitTable.index > -1)
2096 os << 't' << e.mem_obj->xitTable.index;
2097 if (e.mem_obj->memCache.index > -1)
2098 os << 'm' << e.mem_obj->memCache.index;
2099 }
2100 if (e.swap_filen > -1 || e.swap_dirn > -1)
2101 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2102
2103 os << '=';
2104
2105 // print only non-default status values, using unique letters
2106 if (e.mem_status != NOT_IN_MEMORY ||
2107 e.store_status != STORE_PENDING ||
2108 e.swap_status != SWAPOUT_NONE ||
2109 e.ping_status != PING_NONE) {
2110 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2111 if (e.store_status != STORE_PENDING) os << 's';
2112 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2113 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2114 }
2115
2116 // print only set flags, using unique letters
2117 if (e.flags) {
2118 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2119 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2120 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2121 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2122 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2123 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2124 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2125 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2126 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2127 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2128 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2129 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2130 }
2131
2132 if (e.mem_obj && e.mem_obj->smpCollapsed)
2133 os << 'O';
2134
2135 return os << '/' << &e << '*' << e.locks();
2136 }
2137
2138 /* NullStoreEntry */
2139
2140 NullStoreEntry NullStoreEntry::_instance;
2141
2142 NullStoreEntry *
2143 NullStoreEntry::getInstance()
2144 {
2145 return &_instance;
2146 }
2147
2148 char const *
2149 NullStoreEntry::getMD5Text() const
2150 {
2151 return "N/A";
2152 }
2153
2154 void
2155 NullStoreEntry::operator delete(void*)
2156 {
2157 fatal ("Attempt to delete NullStoreEntry\n");
2158 }
2159
2160 char const *
2161 NullStoreEntry::getSerialisedMetaData()
2162 {
2163 return NULL;
2164 }