]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Merge from trunk
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * DEBUG: section 20 Storage Manager
4 * AUTHOR: Harvest Derived
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "CacheDigest.h"
36 #include "CacheManager.h"
37 #include "comm/Connection.h"
38 #include "ETag.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "globals.h"
42 #include "http.h"
43 #include "HttpReply.h"
44 #include "HttpRequest.h"
45 #include "mem_node.h"
46 #include "MemObject.h"
47 #include "mgr/Registration.h"
48 #include "mgr/StoreIoAction.h"
49 #include "profiler/Profiler.h"
50 #include "repl_modules.h"
51 #include "RequestFlags.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "StatCounters.h"
55 #include "stmem.h"
56 #include "Store.h"
57 #include "store_digest.h"
58 #include "store_key_md5.h"
59 #include "store_key_md5.h"
60 #include "store_log.h"
61 #include "store_rebuild.h"
62 #include "StoreClient.h"
63 #include "StoreIOState.h"
64 #include "StoreMeta.h"
65 #include "StrList.h"
66 #include "swap_log_op.h"
67 #include "SwapDir.h"
68 #include "tools.h"
69 #if USE_DELAY_POOLS
70 #include "DelayPools.h"
71 #endif
72
73 #include <climits>
74 #include <stack>
75
76 #define REBUILD_TIMESTAMP_DELTA_MAX 2
77
78 #define STORE_IN_MEM_BUCKETS (229)
79
80 /** \todo Convert these string constants to enum string-arrays generated */
81
82 const char *memStatusStr[] = {
83 "NOT_IN_MEMORY",
84 "IN_MEMORY"
85 };
86
87 const char *pingStatusStr[] = {
88 "PING_NONE",
89 "PING_WAITING",
90 "PING_DONE"
91 };
92
93 const char *storeStatusStr[] = {
94 "STORE_OK",
95 "STORE_PENDING"
96 };
97
98 const char *swapStatusStr[] = {
99 "SWAPOUT_NONE",
100 "SWAPOUT_WRITING",
101 "SWAPOUT_DONE"
102 };
103
104 /*
105 * This defines an repl type
106 */
107
108 typedef struct _storerepl_entry storerepl_entry_t;
109
110 struct _storerepl_entry {
111 const char *typestr;
112 REMOVALPOLICYCREATE *create;
113 };
114
115 static storerepl_entry_t *storerepl_list = NULL;
116
117 /*
118 * local function prototypes
119 */
120 static int getKeyCounter(void);
121 static OBJH storeCheckCachableStats;
122 static EVH storeLateRelease;
123
124 /*
125 * local variables
126 */
127 static std::stack<StoreEntry*> LateReleaseStack;
128 MemAllocator *StoreEntry::pool = NULL;
129
130 StorePointer Store::CurrentRoot = NULL;
131
132 void
133 Store::Root(Store * aRoot)
134 {
135 CurrentRoot = aRoot;
136 }
137
138 void
139 Store::Root(StorePointer aRoot)
140 {
141 Root(aRoot.getRaw());
142 }
143
144 void
145 Store::Stats(StoreEntry * output)
146 {
147 assert (output);
148 Root().stat(*output);
149 }
150
151 void
152 Store::create()
153 {}
154
155 void
156 Store::diskFull()
157 {}
158
159 void
160 Store::sync()
161 {}
162
163 void
164 Store::unlink (StoreEntry &anEntry)
165 {
166 fatal("Store::unlink on invalid Store\n");
167 }
168
169 void *
170 StoreEntry::operator new (size_t bytecount)
171 {
172 assert (bytecount == sizeof (StoreEntry));
173
174 if (!pool) {
175 pool = memPoolCreate ("StoreEntry", bytecount);
176 pool->setChunkSize(2048 * 1024);
177 }
178
179 return pool->alloc();
180 }
181
182 void
183 StoreEntry::operator delete (void *address)
184 {
185 pool->freeOne(address);
186 }
187
188 void
189 StoreEntry::makePublic()
190 {
191 /* This object can be cached for a long time */
192
193 if (!EBIT_TEST(flags, RELEASE_REQUEST))
194 setPublicKey();
195 }
196
197 void
198 StoreEntry::makePrivate()
199 {
200 /* This object should never be cached at all */
201 expireNow();
202 releaseRequest(); /* delete object when not used */
203 }
204
205 void
206 StoreEntry::cacheNegatively()
207 {
208 /* This object may be negatively cached */
209 negativeCache();
210 makePublic();
211 }
212
213 size_t
214 StoreEntry::inUseCount()
215 {
216 if (!pool)
217 return 0;
218 return pool->getInUseCount();
219 }
220
221 const char *
222 StoreEntry::getMD5Text() const
223 {
224 return storeKeyText((const cache_key *)key);
225 }
226
227 #include "comm.h"
228
229 void
230 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
231 {
232 StoreEntry *anEntry = (StoreEntry *)theContext;
233 anEntry->delayAwareRead(aRead.conn,
234 aRead.buf,
235 aRead.len,
236 aRead.callback);
237 }
238
239 void
240 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
241 {
242 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
243 /* sketch: readdeferer* = getdeferer.
244 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
245 */
246
247 if (amountToRead == 0) {
248 assert (mem_obj);
249 /* read ahead limit */
250 /* Perhaps these two calls should both live in MemObject */
251 #if USE_DELAY_POOLS
252 if (!mem_obj->readAheadPolicyCanRead()) {
253 #endif
254 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
255 return;
256 #if USE_DELAY_POOLS
257 }
258
259 /* delay id limit */
260 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
261 return;
262
263 #endif
264
265 }
266
267 if (fd_table[conn->fd].closing()) {
268 // Readers must have closing callbacks if they want to be notified. No
269 // readers appeared to care around 2009/12/14 as they skipped reading
270 // for other reasons. Closing may already be true at the delyaAwareRead
271 // call time or may happen while we wait after delayRead() above.
272 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
273 callback);
274 return; // the read callback will never be called
275 }
276
277 comm_read(conn, buf, amountToRead, callback);
278 }
279
280 size_t
281 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
282 {
283 if (mem_obj == NULL)
284 return aRange.end;
285
286 #if URL_CHECKSUM_DEBUG
287
288 mem_obj->checkUrlChecksum();
289
290 #endif
291
292 if (!mem_obj->readAheadPolicyCanRead())
293 return 0;
294
295 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
296 }
297
298 bool
299 StoreEntry::checkDeferRead(int fd) const
300 {
301 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
302 }
303
304 void
305 StoreEntry::setNoDelay (bool const newValue)
306 {
307 if (mem_obj)
308 mem_obj->setNoDelay(newValue);
309 }
310
311 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
312 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
313 // It does not mean we will read from disk exclusively (or at all!).
314 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
315 // XXX: Collapsed clients cannot predict their type.
316 store_client_t
317 StoreEntry::storeClientType() const
318 {
319 /* The needed offset isn't in memory
320 * XXX TODO: this is wrong for range requests
321 * as the needed offset may *not* be 0, AND
322 * offset 0 in the memory object is the HTTP headers.
323 */
324
325 assert(mem_obj);
326
327 if (mem_obj->inmem_lo)
328 return STORE_DISK_CLIENT;
329
330 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
331 /* I don't think we should be adding clients to aborted entries */
332 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
333 return STORE_MEM_CLIENT;
334 }
335
336 if (store_status == STORE_OK) {
337 /* the object has completed. */
338
339 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
340 if (swap_status == SWAPOUT_DONE) {
341 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
342 if (mem_obj->endOffset() == mem_obj->object_sz) {
343 /* hot object fully swapped in (XXX: or swapped out?) */
344 return STORE_MEM_CLIENT;
345 }
346 } else {
347 /* Memory-only, or currently being swapped out */
348 return STORE_MEM_CLIENT;
349 }
350 }
351 return STORE_DISK_CLIENT;
352 }
353
354 /* here and past, entry is STORE_PENDING */
355 /*
356 * If this is the first client, let it be the mem client
357 */
358 if (mem_obj->nclients == 1)
359 return STORE_MEM_CLIENT;
360
361 /*
362 * If there is no disk file to open yet, we must make this a
363 * mem client. If we can't open the swapin file before writing
364 * to the client, there is no guarantee that we will be able
365 * to open it later when we really need it.
366 */
367 if (swap_status == SWAPOUT_NONE)
368 return STORE_MEM_CLIENT;
369
370 /*
371 * otherwise, make subsequent clients read from disk so they
372 * can not delay the first, and vice-versa.
373 */
374 return STORE_DISK_CLIENT;
375 }
376
377 StoreEntry::StoreEntry() :
378 mem_obj(NULL),
379 timestamp(-1),
380 lastref(-1),
381 expires(-1),
382 lastmod(-1),
383 swap_file_sz(0),
384 refcount(0),
385 flags(0),
386 swap_filen(-1),
387 swap_dirn(-1),
388 mem_status(NOT_IN_MEMORY),
389 ping_status(PING_NONE),
390 store_status(STORE_PENDING),
391 swap_status(SWAPOUT_NONE),
392 lock_count(0)
393 {
394 debugs(20, 5, "StoreEntry constructed, this=" << this);
395 }
396
397 StoreEntry::~StoreEntry()
398 {
399 debugs(20, 5, "StoreEntry destructed, this=" << this);
400 }
401
402 #if USE_ADAPTATION
403 void
404 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
405 {
406 if (!deferredProducer)
407 deferredProducer = producer;
408 else
409 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
410 *deferredProducer << ", requested call: " << *producer);
411 }
412
413 void
414 StoreEntry::kickProducer()
415 {
416 if (deferredProducer != NULL) {
417 ScheduleCallHere(deferredProducer);
418 deferredProducer = NULL;
419 }
420 }
421 #endif
422
423 void
424 StoreEntry::destroyMemObject()
425 {
426 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
427
428 if (MemObject *mem = mem_obj) {
429 // Store::Root() is FATALly missing during shutdown
430 if (mem->xitTable.index >= 0 && !shutting_down)
431 Store::Root().transientsDisconnect(*mem);
432 if (mem->memCache.index >= 0 && !shutting_down)
433 Store::Root().memoryDisconnect(*this);
434
435 setMemStatus(NOT_IN_MEMORY);
436 mem_obj = NULL;
437 delete mem;
438 }
439 }
440
441 void
442 destroyStoreEntry(void *data)
443 {
444 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
445 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
446 assert(e != NULL);
447
448 if (e == NullStoreEntry::getInstance())
449 return;
450
451 // Store::Root() is FATALly missing during shutdown
452 if (e->swap_filen >= 0 && !shutting_down) {
453 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
454 sd.disconnect(*e);
455 }
456
457 e->destroyMemObject();
458
459 e->hashDelete();
460
461 assert(e->key == NULL);
462
463 delete e;
464 }
465
466 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
467
468 void
469 StoreEntry::hashInsert(const cache_key * someKey)
470 {
471 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
472 key = storeKeyDup(someKey);
473 hash_join(store_table, this);
474 }
475
476 void
477 StoreEntry::hashDelete()
478 {
479 if (key) { // some test cases do not create keys and do not hashInsert()
480 hash_remove_link(store_table, this);
481 storeKeyFree((const cache_key *)key);
482 key = NULL;
483 }
484 }
485
486 /* -------------------------------------------------------------------------- */
487
488 /* get rid of memory copy of the object */
489 void
490 StoreEntry::purgeMem()
491 {
492 if (mem_obj == NULL)
493 return;
494
495 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
496
497 Store::Root().memoryUnlink(*this);
498
499 if (swap_status != SWAPOUT_DONE)
500 release();
501 }
502
503 void
504 StoreEntry::lock(const char *context)
505 {
506 ++lock_count;
507 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
508 }
509
510 void
511 StoreEntry::touch()
512 {
513 lastref = squid_curtime;
514 Store::Root().reference(*this);
515 }
516
517 void
518 StoreEntry::setReleaseFlag()
519 {
520 if (EBIT_TEST(flags, RELEASE_REQUEST))
521 return;
522
523 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
524
525 EBIT_SET(flags, RELEASE_REQUEST);
526
527 Store::Root().markForUnlink(*this);
528 }
529
530 void
531 StoreEntry::releaseRequest()
532 {
533 if (EBIT_TEST(flags, RELEASE_REQUEST))
534 return;
535
536 setReleaseFlag(); // makes validToSend() false, preventing future hits
537
538 setPrivateKey();
539 }
540
541 int
542 StoreEntry::unlock(const char *context)
543 {
544 debugs(20, 3, (context ? context : "somebody") <<
545 " unlocking key " << getMD5Text() << ' ' << *this);
546 assert(lock_count > 0);
547 --lock_count;
548
549 if (lock_count)
550 return (int) lock_count;
551
552 if (store_status == STORE_PENDING)
553 setReleaseFlag();
554
555 assert(storePendingNClients(this) == 0);
556
557 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
558 this->release();
559 return 0;
560 }
561
562 if (EBIT_TEST(flags, KEY_PRIVATE))
563 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
564
565 Store::Root().handleIdleEntry(*this); // may delete us
566 return 0;
567 }
568
569 void
570 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
571 {
572 assert (aClient);
573 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
574
575 if (!result)
576 aClient->created (NullStoreEntry::getInstance());
577 else
578 aClient->created (result);
579 }
580
581 void
582 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
583 {
584 assert (aClient);
585 StoreEntry *result = storeGetPublicByRequest (request);
586
587 if (!result)
588 result = NullStoreEntry::getInstance();
589
590 aClient->created (result);
591 }
592
593 void
594 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
595 {
596 assert (aClient);
597 StoreEntry *result = storeGetPublic (uri, method);
598
599 if (!result)
600 result = NullStoreEntry::getInstance();
601
602 aClient->created (result);
603 }
604
605 StoreEntry *
606 storeGetPublic(const char *uri, const HttpRequestMethod& method)
607 {
608 return Store::Root().get(storeKeyPublic(uri, method));
609 }
610
611 StoreEntry *
612 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
613 {
614 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
615 }
616
617 StoreEntry *
618 storeGetPublicByRequest(HttpRequest * req)
619 {
620 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
621
622 if (e == NULL && req->method == Http::METHOD_HEAD)
623 /* We can generate a HEAD reply from a cached GET object */
624 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
625
626 return e;
627 }
628
629 static int
630 getKeyCounter(void)
631 {
632 static int key_counter = 0;
633
634 if (++key_counter < 0)
635 key_counter = 1;
636
637 return key_counter;
638 }
639
640 /* RBC 20050104 AFAICT this should become simpler:
641 * rather than reinserting with a special key it should be marked
642 * as 'released' and then cleaned up when refcounting indicates.
643 * the StoreHashIndex could well implement its 'released' in the
644 * current manner.
645 * Also, clean log writing should skip over ia,t
646 * Otherwise, we need a 'remove from the index but not the store
647 * concept'.
648 */
649 void
650 StoreEntry::setPrivateKey()
651 {
652 const cache_key *newkey;
653
654 if (key && EBIT_TEST(flags, KEY_PRIVATE))
655 return; /* is already private */
656
657 if (key) {
658 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
659
660 // TODO: move into SwapDir::markForUnlink() already called by Root()
661 if (swap_filen > -1)
662 storeDirSwapLog(this, SWAP_LOG_DEL);
663
664 hashDelete();
665 }
666
667 if (mem_obj && mem_obj->hasUris()) {
668 mem_obj->id = getKeyCounter();
669 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
670 } else {
671 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
672 }
673
674 assert(hash_lookup(store_table, newkey) == NULL);
675 EBIT_SET(flags, KEY_PRIVATE);
676 hashInsert(newkey);
677 }
678
679 void
680 StoreEntry::setPublicKey()
681 {
682 const cache_key *newkey;
683
684 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
685 return; /* is already public */
686
687 assert(mem_obj);
688
689 /*
690 * We can't make RELEASE_REQUEST objects public. Depending on
691 * when RELEASE_REQUEST gets set, we might not be swapping out
692 * the object. If we're not swapping out, then subsequent
693 * store clients won't be able to access object data which has
694 * been freed from memory.
695 *
696 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
697 */
698 #if MORE_DEBUG_OUTPUT
699
700 if (EBIT_TEST(flags, RELEASE_REQUEST))
701 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
702
703 #endif
704
705 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
706
707 if (mem_obj->request) {
708 HttpRequest *request = mem_obj->request;
709
710 if (!mem_obj->vary_headers) {
711 /* First handle the case where the object no longer varies */
712 safe_free(request->vary_headers);
713 } else {
714 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
715 /* Oops.. the variance has changed. Kill the base object
716 * to record the new variance key
717 */
718 safe_free(request->vary_headers); /* free old "bad" variance key */
719 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
720 pe->release();
721 }
722
723 /* Make sure the request knows the variance status */
724 if (!request->vary_headers) {
725 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
726
727 if (vary)
728 request->vary_headers = xstrdup(vary);
729 }
730 }
731
732 // TODO: storeGetPublic() calls below may create unlocked entries.
733 // We should add/use storeHas() API or lock/unlock those entries.
734 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
735 /* Create "vary" base object */
736 String vary;
737 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
738 /* We are allowed to do this typecast */
739 HttpReply *rep = new HttpReply;
740 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
741 vary = mem_obj->getReply()->header.getList(HDR_VARY);
742
743 if (vary.size()) {
744 /* Again, we own this structure layout */
745 rep->header.putStr(HDR_VARY, vary.termedBuf());
746 vary.clean();
747 }
748
749 #if X_ACCELERATOR_VARY
750 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
751
752 if (vary.size() > 0) {
753 /* Again, we own this structure layout */
754 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
755 vary.clean();
756 }
757
758 #endif
759 pe->replaceHttpReply(rep, false); // no write until key is public
760
761 pe->timestampsSet();
762
763 pe->makePublic();
764
765 pe->startWriting(); // after makePublic()
766
767 pe->complete();
768
769 pe->unlock("StoreEntry::setPublicKey+Vary");
770 }
771
772 newkey = storeKeyPublicByRequest(mem_obj->request);
773 } else
774 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
775
776 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
777 debugs(20, 3, "Making old " << *e2 << " private.");
778 e2->setPrivateKey();
779 e2->release();
780
781 if (mem_obj->request)
782 newkey = storeKeyPublicByRequest(mem_obj->request);
783 else
784 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
785 }
786
787 if (key)
788 hashDelete();
789
790 EBIT_CLR(flags, KEY_PRIVATE);
791
792 hashInsert(newkey);
793
794 if (swap_filen > -1)
795 storeDirSwapLog(this, SWAP_LOG_ADD);
796 }
797
798 StoreEntry *
799 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
800 {
801 StoreEntry *e = NULL;
802 debugs(20, 3, "storeCreateEntry: '" << url << "'");
803
804 e = new StoreEntry();
805 e->makeMemObject();
806 e->mem_obj->setUris(url, log_url, method);
807
808 if (flags.cachable) {
809 EBIT_CLR(e->flags, RELEASE_REQUEST);
810 } else {
811 e->releaseRequest();
812 }
813
814 e->store_status = STORE_PENDING;
815 e->refcount = 0;
816 e->lastref = squid_curtime;
817 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
818 e->ping_status = PING_NONE;
819 EBIT_SET(e->flags, ENTRY_VALIDATED);
820 return e;
821 }
822
823 StoreEntry *
824 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
825 {
826 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
827 e->lock("storeCreateEntry");
828
829 if (neighbors_do_private_keys || !flags.hierarchical)
830 e->setPrivateKey();
831 else
832 e->setPublicKey();
833
834 return e;
835 }
836
837 /* Mark object as expired */
838 void
839 StoreEntry::expireNow()
840 {
841 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
842 expires = squid_curtime;
843 }
844
845 void
846 StoreEntry::write (StoreIOBuffer writeBuffer)
847 {
848 assert(mem_obj != NULL);
849 /* This assert will change when we teach the store to update */
850 PROF_start(StoreEntry_write);
851 assert(store_status == STORE_PENDING);
852
853 // XXX: caller uses content offset, but we also store headers
854 if (const HttpReply *reply = mem_obj->getReply())
855 writeBuffer.offset += reply->hdr_sz;
856
857 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
858 PROF_stop(StoreEntry_write);
859 storeGetMemSpace(writeBuffer.length);
860 mem_obj->write(writeBuffer);
861
862 if (!EBIT_TEST(flags, DELAY_SENDING))
863 invokeHandlers();
864 }
865
866 /* Append incoming data from a primary server to an entry. */
867 void
868 StoreEntry::append(char const *buf, int len)
869 {
870 assert(mem_obj != NULL);
871 assert(len >= 0);
872 assert(store_status == STORE_PENDING);
873
874 StoreIOBuffer tempBuffer;
875 tempBuffer.data = (char *)buf;
876 tempBuffer.length = len;
877 /*
878 * XXX sigh, offset might be < 0 here, but it gets "corrected"
879 * later. This offset crap is such a mess.
880 */
881 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
882 write(tempBuffer);
883 }
884
885 void
886 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
887 {
888 va_list args;
889 va_start(args, fmt);
890
891 storeAppendVPrintf(e, fmt, args);
892 va_end(args);
893 }
894
895 /* used be storeAppendPrintf and Packer */
896 void
897 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
898 {
899 LOCAL_ARRAY(char, buf, 4096);
900 buf[0] = '\0';
901 vsnprintf(buf, 4096, fmt, vargs);
902 e->append(buf, strlen(buf));
903 }
904
905 struct _store_check_cachable_hist {
906
907 struct {
908 int non_get;
909 int not_entry_cachable;
910 int wrong_content_length;
911 int negative_cached;
912 int too_big;
913 int too_small;
914 int private_key;
915 int too_many_open_files;
916 int too_many_open_fds;
917 } no;
918
919 struct {
920 int Default;
921 } yes;
922 } store_check_cachable_hist;
923
924 int
925 storeTooManyDiskFilesOpen(void)
926 {
927 if (Config.max_open_disk_fds == 0)
928 return 0;
929
930 if (store_open_disk_fd > Config.max_open_disk_fds)
931 return 1;
932
933 return 0;
934 }
935
936 int
937 StoreEntry::checkTooSmall()
938 {
939 if (EBIT_TEST(flags, ENTRY_SPECIAL))
940 return 0;
941
942 if (STORE_OK == store_status)
943 if (mem_obj->object_sz < 0 ||
944 mem_obj->object_sz < Config.Store.minObjectSize)
945 return 1;
946 if (getReply()->content_length > -1)
947 if (getReply()->content_length < Config.Store.minObjectSize)
948 return 1;
949 return 0;
950 }
951
952 // TODO: remove checks already performed by swapoutPossible()
953 // TODO: move "too many open..." checks outside -- we are called too early/late
954 int
955 StoreEntry::checkCachable()
956 {
957 #if CACHE_ALL_METHODS
958
959 if (mem_obj->method != Http::METHOD_GET) {
960 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
961 ++store_check_cachable_hist.no.non_get;
962 } else
963 #endif
964 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
965 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
966 ++store_check_cachable_hist.no.wrong_content_length;
967 } else if (EBIT_TEST(flags, RELEASE_REQUEST)) {
968 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
969 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
970 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
971 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
972 ++store_check_cachable_hist.no.negative_cached;
973 return 0; /* avoid release call below */
974 } else if ((getReply()->content_length > 0 &&
975 getReply()->content_length > store_maxobjsize) ||
976 mem_obj->endOffset() > store_maxobjsize) {
977 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
978 ++store_check_cachable_hist.no.too_big;
979 } else if (checkTooSmall()) {
980 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
981 ++store_check_cachable_hist.no.too_small;
982 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
983 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
984 ++store_check_cachable_hist.no.private_key;
985 } else if (swap_status != SWAPOUT_NONE) {
986 /*
987 * here we checked the swap_status because the remaining
988 * cases are only relevant only if we haven't started swapping
989 * out the object yet.
990 */
991 return 1;
992 } else if (storeTooManyDiskFilesOpen()) {
993 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
994 ++store_check_cachable_hist.no.too_many_open_files;
995 } else if (fdNFree() < RESERVED_FD) {
996 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
997 ++store_check_cachable_hist.no.too_many_open_fds;
998 } else {
999 ++store_check_cachable_hist.yes.Default;
1000 return 1;
1001 }
1002
1003 releaseRequest();
1004 return 0;
1005 }
1006
1007 void
1008 storeCheckCachableStats(StoreEntry *sentry)
1009 {
1010 storeAppendPrintf(sentry, "Category\t Count\n");
1011
1012 #if CACHE_ALL_METHODS
1013
1014 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1015 store_check_cachable_hist.no.non_get);
1016 #endif
1017
1018 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1019 store_check_cachable_hist.no.not_entry_cachable);
1020 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1021 store_check_cachable_hist.no.wrong_content_length);
1022 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1023 store_check_cachable_hist.no.negative_cached);
1024 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1025 store_check_cachable_hist.no.too_big);
1026 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1027 store_check_cachable_hist.no.too_small);
1028 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1029 store_check_cachable_hist.no.private_key);
1030 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1031 store_check_cachable_hist.no.too_many_open_files);
1032 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1033 store_check_cachable_hist.no.too_many_open_fds);
1034 storeAppendPrintf(sentry, "yes.default\t%d\n",
1035 store_check_cachable_hist.yes.Default);
1036 }
1037
1038 void
1039 StoreEntry::complete()
1040 {
1041 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1042
1043 if (store_status != STORE_PENDING) {
1044 /*
1045 * if we're not STORE_PENDING, then probably we got aborted
1046 * and there should be NO clients on this entry
1047 */
1048 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1049 assert(mem_obj->nclients == 0);
1050 return;
1051 }
1052
1053 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1054 * in use of object_sz?
1055 */
1056 mem_obj->object_sz = mem_obj->endOffset();
1057
1058 store_status = STORE_OK;
1059
1060 assert(mem_status == NOT_IN_MEMORY);
1061
1062 if (!validLength()) {
1063 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1064 releaseRequest();
1065 }
1066
1067 #if USE_CACHE_DIGESTS
1068 if (mem_obj->request)
1069 mem_obj->request->hier.store_complete_stop = current_time;
1070
1071 #endif
1072 /*
1073 * We used to call invokeHandlers, then storeSwapOut. However,
1074 * Madhukar Reddy <myreddy@persistence.com> reported that
1075 * responses without content length would sometimes get released
1076 * in client_side, thinking that the response is incomplete.
1077 */
1078 invokeHandlers();
1079 }
1080
1081 /*
1082 * Someone wants to abort this transfer. Set the reason in the
1083 * request structure, call the server-side callback and mark the
1084 * entry for releasing
1085 */
1086 void
1087 StoreEntry::abort()
1088 {
1089 ++statCounter.aborted_requests;
1090 assert(store_status == STORE_PENDING);
1091 assert(mem_obj != NULL);
1092 debugs(20, 6, "storeAbort: " << getMD5Text());
1093
1094 lock("StoreEntry::abort"); /* lock while aborting */
1095 negativeCache();
1096
1097 releaseRequest();
1098
1099 EBIT_SET(flags, ENTRY_ABORTED);
1100
1101 setMemStatus(NOT_IN_MEMORY);
1102
1103 store_status = STORE_OK;
1104
1105 /* Notify the server side */
1106
1107 /*
1108 * DPW 2007-05-07
1109 * Should we check abort.data for validity?
1110 */
1111 if (mem_obj->abort.callback) {
1112 if (!cbdataReferenceValid(mem_obj->abort.data))
1113 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1114 eventAdd("mem_obj->abort.callback",
1115 mem_obj->abort.callback,
1116 mem_obj->abort.data,
1117 0.0,
1118 true);
1119 unregisterAbort();
1120 }
1121
1122 /* XXX Should we reverse these two, so that there is no
1123 * unneeded disk swapping triggered?
1124 */
1125 /* Notify the client side */
1126 invokeHandlers();
1127
1128 // abort swap out, invalidating what was created so far (release follows)
1129 swapOutFileClose(StoreIOState::writerGone);
1130
1131 unlock("StoreEntry::abort"); /* unlock */
1132 }
1133
1134 /**
1135 * Clear Memory storage to accommodate the given object len
1136 */
1137 void
1138 storeGetMemSpace(int size)
1139 {
1140 PROF_start(storeGetMemSpace);
1141 StoreEntry *e = NULL;
1142 int released = 0;
1143 static time_t last_check = 0;
1144 size_t pages_needed;
1145 RemovalPurgeWalker *walker;
1146
1147 if (squid_curtime == last_check) {
1148 PROF_stop(storeGetMemSpace);
1149 return;
1150 }
1151
1152 last_check = squid_curtime;
1153
1154 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1155
1156 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1157 PROF_stop(storeGetMemSpace);
1158 return;
1159 }
1160
1161 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1162 " pages");
1163
1164 /* XXX what to set as max_scan here? */
1165 walker = mem_policy->PurgeInit(mem_policy, 100000);
1166
1167 while ((e = walker->Next(walker))) {
1168 e->purgeMem();
1169 ++released;
1170
1171 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1172 break;
1173 }
1174
1175 walker->Done(walker);
1176 debugs(20, 3, "storeGetMemSpace stats:");
1177 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1178 debugs(20, 3, " " << std::setw(6) << released << " were released");
1179 PROF_stop(storeGetMemSpace);
1180 }
1181
1182 /* thunk through to Store::Root().maintain(). Note that this would be better still
1183 * if registered against the root store itself, but that requires more complex
1184 * update logic - bigger fish to fry first. Long term each store when
1185 * it becomes active will self register
1186 */
1187 void
1188 Store::Maintain(void *notused)
1189 {
1190 Store::Root().maintain();
1191
1192 /* Reregister a maintain event .. */
1193 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1194
1195 }
1196
1197 /* The maximum objects to scan for maintain storage space */
1198 #define MAINTAIN_MAX_SCAN 1024
1199 #define MAINTAIN_MAX_REMOVE 64
1200
1201 /*
1202 * This routine is to be called by main loop in main.c.
1203 * It removes expired objects on only one bucket for each time called.
1204 *
1205 * This should get called 1/s from main().
1206 */
1207 void
1208 StoreController::maintain()
1209 {
1210 static time_t last_warn_time = 0;
1211
1212 PROF_start(storeMaintainSwapSpace);
1213 swapDir->maintain();
1214
1215 /* this should be emitted by the oversize dir, not globally */
1216
1217 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1218 if (squid_curtime - last_warn_time > 10) {
1219 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1220 << Store::Root().currentSize() / 1024.0 << " KB > "
1221 << (Store::Root().maxSize() >> 10) << " KB");
1222 last_warn_time = squid_curtime;
1223 }
1224 }
1225
1226 PROF_stop(storeMaintainSwapSpace);
1227 }
1228
1229 /* release an object from a cache */
1230 void
1231 StoreEntry::release()
1232 {
1233 PROF_start(storeRelease);
1234 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1235 /* If, for any reason we can't discard this object because of an
1236 * outstanding request, mark it for pending release */
1237
1238 if (locked()) {
1239 expireNow();
1240 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1241 releaseRequest();
1242 PROF_stop(storeRelease);
1243 return;
1244 }
1245
1246 Store::Root().memoryUnlink(*this);
1247
1248 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1249 setPrivateKey();
1250
1251 if (swap_filen > -1) {
1252 // lock the entry until rebuilding is done
1253 lock("storeLateRelease");
1254 setReleaseFlag();
1255 LateReleaseStack.push(this);
1256 } else {
1257 destroyStoreEntry(static_cast<hash_link *>(this));
1258 // "this" is no longer valid
1259 }
1260
1261 PROF_stop(storeRelease);
1262 return;
1263 }
1264
1265 storeLog(STORE_LOG_RELEASE, this);
1266
1267 if (swap_filen > -1) {
1268 // log before unlink() below clears swap_filen
1269 if (!EBIT_TEST(flags, KEY_PRIVATE))
1270 storeDirSwapLog(this, SWAP_LOG_DEL);
1271
1272 unlink();
1273 }
1274
1275 destroyStoreEntry(static_cast<hash_link *>(this));
1276 PROF_stop(storeRelease);
1277 }
1278
1279 static void
1280 storeLateRelease(void *unused)
1281 {
1282 StoreEntry *e;
1283 static int n = 0;
1284
1285 if (StoreController::store_dirs_rebuilding) {
1286 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1287 return;
1288 }
1289
1290 // TODO: this works but looks unelegant.
1291 for (int i = 0; i < 10; ++i) {
1292 if (LateReleaseStack.empty()) {
1293 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1294 return;
1295 } else {
1296 e = LateReleaseStack.top();
1297 LateReleaseStack.pop();
1298 }
1299
1300 e->unlock("storeLateRelease");
1301 ++n;
1302 }
1303
1304 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1305 }
1306
1307 /* return 1 if a store entry is locked */
1308 int
1309 StoreEntry::locked() const
1310 {
1311 if (lock_count)
1312 return 1;
1313
1314 /*
1315 * SPECIAL, PUBLIC entries should be "locked";
1316 * XXX: Their owner should lock them then instead of relying on this hack.
1317 */
1318 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1319 if (!EBIT_TEST(flags, KEY_PRIVATE))
1320 return 1;
1321
1322 return 0;
1323 }
1324
1325 bool
1326 StoreEntry::validLength() const
1327 {
1328 int64_t diff;
1329 const HttpReply *reply;
1330 assert(mem_obj != NULL);
1331 reply = getReply();
1332 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1333 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1334 objectLen());
1335 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1336 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1337
1338 if (reply->content_length < 0) {
1339 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1340 return 1;
1341 }
1342
1343 if (reply->hdr_sz == 0) {
1344 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1345 return 1;
1346 }
1347
1348 if (mem_obj->method == Http::METHOD_HEAD) {
1349 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1350 return 1;
1351 }
1352
1353 if (reply->sline.status() == Http::scNotModified)
1354 return 1;
1355
1356 if (reply->sline.status() == Http::scNoContent)
1357 return 1;
1358
1359 diff = reply->hdr_sz + reply->content_length - objectLen();
1360
1361 if (diff == 0)
1362 return 1;
1363
1364 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1365
1366 return 0;
1367 }
1368
1369 static void
1370 storeRegisterWithCacheManager(void)
1371 {
1372 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1373 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1374 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1375 storeCheckCachableStats, 0, 1);
1376 }
1377
1378 void
1379 storeInit(void)
1380 {
1381 storeKeyInit();
1382 mem_policy = createRemovalPolicy(Config.memPolicy);
1383 storeDigestInit();
1384 storeLogOpen();
1385 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1386 Store::Root().init();
1387 storeRebuildStart();
1388
1389 storeRegisterWithCacheManager();
1390 }
1391
1392 void
1393 storeConfigure(void)
1394 {
1395 store_swap_high = (long) (((float) Store::Root().maxSize() *
1396 (float) Config.Swap.highWaterMark) / (float) 100);
1397 store_swap_low = (long) (((float) Store::Root().maxSize() *
1398 (float) Config.Swap.lowWaterMark) / (float) 100);
1399 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1400 }
1401
1402 bool
1403 StoreEntry::memoryCachable() const
1404 {
1405 if (mem_obj == NULL)
1406 return 0;
1407
1408 if (mem_obj->data_hdr.size() == 0)
1409 return 0;
1410
1411 if (mem_obj->inmem_lo != 0)
1412 return 0;
1413
1414 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1415 return 0;
1416
1417 return 1;
1418 }
1419
1420 int
1421 StoreEntry::checkNegativeHit() const
1422 {
1423 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1424 return 0;
1425
1426 if (expires <= squid_curtime)
1427 return 0;
1428
1429 if (store_status != STORE_OK)
1430 return 0;
1431
1432 return 1;
1433 }
1434
1435 /**
1436 * Set object for negative caching.
1437 * Preserves any expiry information given by the server.
1438 * In absence of proper expiry info it will set to expire immediately,
1439 * or with HTTP-violations enabled the configured negative-TTL is observed
1440 */
1441 void
1442 StoreEntry::negativeCache()
1443 {
1444 // XXX: should make the default for expires 0 instead of -1
1445 // so we can distinguish "Expires: -1" from nothing.
1446 if (expires <= 0)
1447 #if USE_HTTP_VIOLATIONS
1448 expires = squid_curtime + Config.negativeTtl;
1449 #else
1450 expires = squid_curtime;
1451 #endif
1452 EBIT_SET(flags, ENTRY_NEGCACHED);
1453 }
1454
1455 void
1456 storeFreeMemory(void)
1457 {
1458 Store::Root(NULL);
1459 #if USE_CACHE_DIGESTS
1460
1461 if (store_digest)
1462 cacheDigestDestroy(store_digest);
1463
1464 #endif
1465
1466 store_digest = NULL;
1467 }
1468
1469 int
1470 expiresMoreThan(time_t expires, time_t when)
1471 {
1472 if (expires < 0) /* No Expires given */
1473 return 1;
1474
1475 return (expires > (squid_curtime + when));
1476 }
1477
1478 int
1479 StoreEntry::validToSend() const
1480 {
1481 if (EBIT_TEST(flags, RELEASE_REQUEST))
1482 return 0;
1483
1484 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1485 if (expires <= squid_curtime)
1486 return 0;
1487
1488 if (EBIT_TEST(flags, ENTRY_ABORTED))
1489 return 0;
1490
1491 // now check that the entry has a cache backing or is collapsed
1492 if (swap_filen > -1) // backed by a disk cache
1493 return 1;
1494
1495 if (swappingOut()) // will be backed by a disk cache
1496 return 1;
1497
1498 if (!mem_obj) // not backed by a memory cache and not collapsed
1499 return 0;
1500
1501 if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1502 return 0;
1503
1504 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1505 // disk cache backing so we should not rely on the store cache at all. This
1506 // is wrong for range requests that could feed off nibbled memory (XXX).
1507 if (mem_obj->inmem_lo) // in local memory cache, but got nibbled at
1508 return 0;
1509
1510 return 1;
1511 }
1512
1513 void
1514 StoreEntry::timestampsSet()
1515 {
1516 const HttpReply *reply = getReply();
1517 time_t served_date = reply->date;
1518 int age = reply->header.getInt(HDR_AGE);
1519 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1520 /* make sure that 0 <= served_date <= squid_curtime */
1521
1522 if (served_date < 0 || served_date > squid_curtime)
1523 served_date = squid_curtime;
1524
1525 /* Bug 1791:
1526 * If the returned Date: is more than 24 hours older than
1527 * the squid_curtime, then one of us needs to use NTP to set our
1528 * clock. We'll pretend that our clock is right.
1529 */
1530 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1531 served_date = squid_curtime;
1532
1533 /*
1534 * Compensate with Age header if origin server clock is ahead
1535 * of us and there is a cache in between us and the origin
1536 * server. But DONT compensate if the age value is larger than
1537 * squid_curtime because it results in a negative served_date.
1538 */
1539 if (age > squid_curtime - served_date)
1540 if (squid_curtime > age)
1541 served_date = squid_curtime - age;
1542
1543 // compensate for Squid-to-server and server-to-Squid delays
1544 if (mem_obj && mem_obj->request) {
1545 const time_t request_sent =
1546 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1547 if (0 < request_sent && request_sent < squid_curtime)
1548 served_date -= (squid_curtime - request_sent);
1549 }
1550
1551 if (reply->expires > 0 && reply->date > -1)
1552 expires = served_date + (reply->expires - reply->date);
1553 else
1554 expires = reply->expires;
1555
1556 lastmod = reply->last_modified;
1557
1558 timestamp = served_date;
1559 }
1560
1561 void
1562 StoreEntry::registerAbort(STABH * cb, void *data)
1563 {
1564 assert(mem_obj);
1565 assert(mem_obj->abort.callback == NULL);
1566 mem_obj->abort.callback = cb;
1567 mem_obj->abort.data = cbdataReference(data);
1568 }
1569
1570 void
1571 StoreEntry::unregisterAbort()
1572 {
1573 assert(mem_obj);
1574 if (mem_obj->abort.callback) {
1575 mem_obj->abort.callback = NULL;
1576 cbdataReferenceDone(mem_obj->abort.data);
1577 }
1578 }
1579
1580 void
1581 StoreEntry::dump(int l) const
1582 {
1583 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1584 debugs(20, l, "StoreEntry->next: " << next);
1585 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1586 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1587 debugs(20, l, "StoreEntry->lastref: " << lastref);
1588 debugs(20, l, "StoreEntry->expires: " << expires);
1589 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1590 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1591 debugs(20, l, "StoreEntry->refcount: " << refcount);
1592 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1593 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1594 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1595 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1596 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1597 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1598 debugs(20, l, "StoreEntry->store_status: " << store_status);
1599 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1600 }
1601
1602 /*
1603 * NOTE, this function assumes only two mem states
1604 */
1605 void
1606 StoreEntry::setMemStatus(mem_status_t new_status)
1607 {
1608 if (new_status == mem_status)
1609 return;
1610
1611 // are we using a shared memory cache?
1612 if (Config.memShared && IamWorkerProcess()) {
1613 // This method was designed to update replacement policy, not to
1614 // actually purge something from the memory cache (TODO: rename?).
1615 // Shared memory cache does not have a policy that needs updates.
1616 mem_status = new_status;
1617 return;
1618 }
1619
1620 assert(mem_obj != NULL);
1621
1622 if (new_status == IN_MEMORY) {
1623 assert(mem_obj->inmem_lo == 0);
1624
1625 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1626 debugs(20, 4, "not inserting special " << *this << " into policy");
1627 } else {
1628 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1629 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1630 }
1631
1632 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1633 } else {
1634 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1635 debugs(20, 4, "not removing special " << *this << " from policy");
1636 } else {
1637 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1638 debugs(20, 4, "removed " << *this);
1639 }
1640
1641 --hot_obj_count;
1642 }
1643
1644 mem_status = new_status;
1645 }
1646
1647 const char *
1648 StoreEntry::url() const
1649 {
1650 if (this == NULL)
1651 return "[null_entry]";
1652 else if (mem_obj == NULL)
1653 return "[null_mem_obj]";
1654 else
1655 return mem_obj->storeId();
1656 }
1657
1658 MemObject *
1659 StoreEntry::makeMemObject()
1660 {
1661 if (!mem_obj)
1662 mem_obj = new MemObject();
1663 return mem_obj;
1664 }
1665
1666 void
1667 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1668 {
1669 makeMemObject();
1670 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1671 }
1672
1673 /* this just sets DELAY_SENDING */
1674 void
1675 StoreEntry::buffer()
1676 {
1677 EBIT_SET(flags, DELAY_SENDING);
1678 }
1679
1680 /* this just clears DELAY_SENDING and Invokes the handlers */
1681 void
1682 StoreEntry::flush()
1683 {
1684 if (EBIT_TEST(flags, DELAY_SENDING)) {
1685 EBIT_CLR(flags, DELAY_SENDING);
1686 invokeHandlers();
1687 }
1688 }
1689
1690 int64_t
1691 StoreEntry::objectLen() const
1692 {
1693 assert(mem_obj != NULL);
1694 return mem_obj->object_sz;
1695 }
1696
1697 int64_t
1698 StoreEntry::contentLen() const
1699 {
1700 assert(mem_obj != NULL);
1701 assert(getReply() != NULL);
1702 return objectLen() - getReply()->hdr_sz;
1703 }
1704
1705 HttpReply const *
1706 StoreEntry::getReply () const
1707 {
1708 if (NULL == mem_obj)
1709 return NULL;
1710
1711 return mem_obj->getReply();
1712 }
1713
1714 void
1715 StoreEntry::reset()
1716 {
1717 assert (mem_obj);
1718 debugs(20, 3, "StoreEntry::reset: " << url());
1719 mem_obj->reset();
1720 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1721 rep->reset();
1722 expires = lastmod = timestamp = -1;
1723 }
1724
1725 /*
1726 * storeFsInit
1727 *
1728 * This routine calls the SETUP routine for each fs type.
1729 * I don't know where the best place for this is, and I'm not going to shuffle
1730 * around large chunks of code right now (that can be done once its working.)
1731 */
1732 void
1733 storeFsInit(void)
1734 {
1735 storeReplSetup();
1736 }
1737
1738 /*
1739 * called to add another store removal policy module
1740 */
1741 void
1742 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1743 {
1744 int i;
1745
1746 /* find the number of currently known repl types */
1747 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1748 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1749 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1750 return;
1751 }
1752 }
1753
1754 /* add the new type */
1755 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1756
1757 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1758
1759 storerepl_list[i].typestr = type;
1760
1761 storerepl_list[i].create = create;
1762 }
1763
1764 /*
1765 * Create a removal policy instance
1766 */
1767 RemovalPolicy *
1768 createRemovalPolicy(RemovalPolicySettings * settings)
1769 {
1770 storerepl_entry_t *r;
1771
1772 for (r = storerepl_list; r && r->typestr; ++r) {
1773 if (strcmp(r->typestr, settings->type) == 0)
1774 return r->create(settings->args);
1775 }
1776
1777 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1778 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1779 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1780 fatalf("ERROR: Unknown policy %s\n", settings->type);
1781 return NULL; /* NOTREACHED */
1782 }
1783
1784 #if 0
1785 void
1786 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1787 {
1788 if (e->swap_file_number == filn)
1789 return;
1790
1791 if (filn < 0) {
1792 assert(-1 == filn);
1793 storeDirMapBitReset(e->swap_file_number);
1794 storeDirLRUDelete(e);
1795 e->swap_file_number = -1;
1796 } else {
1797 assert(-1 == e->swap_file_number);
1798 storeDirMapBitSet(e->swap_file_number = filn);
1799 storeDirLRUAdd(e);
1800 }
1801 }
1802
1803 #endif
1804
1805 /*
1806 * Replace a store entry with
1807 * a new reply. This eats the reply.
1808 */
1809 void
1810 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1811 {
1812 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1813
1814 if (!mem_obj) {
1815 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1816 return;
1817 }
1818
1819 mem_obj->replaceHttpReply(rep);
1820
1821 if (andStartWriting)
1822 startWriting();
1823 }
1824
1825 void
1826 StoreEntry::startWriting()
1827 {
1828 Packer p;
1829
1830 /* TODO: when we store headers serparately remove the header portion */
1831 /* TODO: mark the length of the headers ? */
1832 /* We ONLY want the headers */
1833 packerToStoreInit(&p, this);
1834
1835 assert (isEmpty());
1836 assert(mem_obj);
1837
1838 const HttpReply *rep = getReply();
1839 assert(rep);
1840
1841 rep->packHeadersInto(&p);
1842 mem_obj->markEndOfReplyHeaders();
1843 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1844
1845 rep->body.packInto(&p);
1846
1847 packerClean(&p);
1848 }
1849
1850 char const *
1851 StoreEntry::getSerialisedMetaData()
1852 {
1853 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1854 int swap_hdr_sz;
1855 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1856 storeSwapTLVFree(tlv_list);
1857 assert (swap_hdr_sz >= 0);
1858 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1859 return result;
1860 }
1861
1862 void
1863 StoreEntry::trimMemory(const bool preserveSwappable)
1864 {
1865 /*
1866 * DPW 2007-05-09
1867 * Bug #1943. We must not let go any data for IN_MEMORY
1868 * objects. We have to wait until the mem_status changes.
1869 */
1870 if (mem_status == IN_MEMORY)
1871 return;
1872
1873 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1874 return; // cannot trim because we do not load them again
1875
1876 if (preserveSwappable)
1877 mem_obj->trimSwappable();
1878 else
1879 mem_obj->trimUnSwappable();
1880
1881 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1882 }
1883
1884 bool
1885 StoreEntry::modifiedSince(HttpRequest * request) const
1886 {
1887 int object_length;
1888 time_t mod_time = lastmod;
1889
1890 if (mod_time < 0)
1891 mod_time = timestamp;
1892
1893 debugs(88, 3, "modifiedSince: '" << url() << "'");
1894
1895 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1896
1897 if (mod_time < 0)
1898 return true;
1899
1900 /* Find size of the object */
1901 object_length = getReply()->content_length;
1902
1903 if (object_length < 0)
1904 object_length = contentLen();
1905
1906 if (mod_time > request->ims) {
1907 debugs(88, 3, "--> YES: entry newer than client");
1908 return true;
1909 } else if (mod_time < request->ims) {
1910 debugs(88, 3, "--> NO: entry older than client");
1911 return false;
1912 } else if (request->imslen < 0) {
1913 debugs(88, 3, "--> NO: same LMT, no client length");
1914 return false;
1915 } else if (request->imslen == object_length) {
1916 debugs(88, 3, "--> NO: same LMT, same length");
1917 return false;
1918 } else {
1919 debugs(88, 3, "--> YES: same LMT, different length");
1920 return true;
1921 }
1922 }
1923
1924 bool
1925 StoreEntry::hasEtag(ETag &etag) const
1926 {
1927 if (const HttpReply *reply = getReply()) {
1928 etag = reply->header.getETag(HDR_ETAG);
1929 if (etag.str)
1930 return true;
1931 }
1932 return false;
1933 }
1934
1935 bool
1936 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1937 {
1938 const String reqETags = request.header.getList(HDR_IF_MATCH);
1939 return hasOneOfEtags(reqETags, false);
1940 }
1941
1942 bool
1943 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1944 {
1945 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1946 // weak comparison is allowed only for HEAD or full-body GET requests
1947 const bool allowWeakMatch = !request.flags.isRanged &&
1948 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1949 return hasOneOfEtags(reqETags, allowWeakMatch);
1950 }
1951
1952 /// whether at least one of the request ETags matches entity ETag
1953 bool
1954 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1955 {
1956 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1957 if (!repETag.str)
1958 return strListIsMember(&reqETags, "*", ',');
1959
1960 bool matched = false;
1961 const char *pos = NULL;
1962 const char *item;
1963 int ilen;
1964 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1965 if (!strncmp(item, "*", ilen))
1966 matched = true;
1967 else {
1968 String str;
1969 str.append(item, ilen);
1970 ETag reqETag;
1971 if (etagParseInit(&reqETag, str.termedBuf())) {
1972 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1973 etagIsStrongEqual(repETag, reqETag);
1974 }
1975 }
1976 }
1977 return matched;
1978 }
1979
1980 SwapDir::Pointer
1981 StoreEntry::store() const
1982 {
1983 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
1984 return INDEXSD(swap_dirn);
1985 }
1986
1987 void
1988 StoreEntry::unlink()
1989 {
1990 store()->unlink(*this); // implies disconnect()
1991 swap_filen = -1;
1992 swap_dirn = -1;
1993 swap_status = SWAPOUT_NONE;
1994 }
1995
1996 /*
1997 * return true if the entry is in a state where
1998 * it can accept more data (ie with write() method)
1999 */
2000 bool
2001 StoreEntry::isAccepting() const
2002 {
2003 if (STORE_PENDING != store_status)
2004 return false;
2005
2006 if (EBIT_TEST(flags, ENTRY_ABORTED))
2007 return false;
2008
2009 return true;
2010 }
2011
2012 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2013 {
2014 os << "e:";
2015
2016 if (e.mem_obj) {
2017 if (e.mem_obj->xitTable.index > -1)
2018 os << 't' << e.mem_obj->xitTable.index;
2019 if (e.mem_obj->memCache.index > -1)
2020 os << 'm' << e.mem_obj->memCache.index;
2021 }
2022 if (e.swap_filen > -1 || e.swap_dirn > -1)
2023 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2024
2025 os << '=';
2026
2027 // print only non-default status values, using unique letters
2028 if (e.mem_status != NOT_IN_MEMORY ||
2029 e.store_status != STORE_PENDING ||
2030 e.swap_status != SWAPOUT_NONE ||
2031 e.ping_status != PING_NONE) {
2032 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2033 if (e.store_status != STORE_PENDING) os << 's';
2034 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2035 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2036 }
2037
2038 // print only set flags, using unique letters
2039 if (e.flags) {
2040 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2041 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2042 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2043 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2044 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2045 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2046 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2047 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2048 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2049 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2050 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2051 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2052 }
2053
2054 if (e.mem_obj && e.mem_obj->smpCollapsed)
2055 os << 'O';
2056
2057 return os << '/' << &e << '*' << e.locks();
2058 }
2059
2060 /* NullStoreEntry */
2061
2062 NullStoreEntry NullStoreEntry::_instance;
2063
2064 NullStoreEntry *
2065 NullStoreEntry::getInstance()
2066 {
2067 return &_instance;
2068 }
2069
2070 char const *
2071 NullStoreEntry::getMD5Text() const
2072 {
2073 return "N/A";
2074 }
2075
2076 void
2077 NullStoreEntry::operator delete(void*)
2078 {
2079 fatal ("Attempt to delete NullStoreEntry\n");
2080 }
2081
2082 char const *
2083 NullStoreEntry::getSerialisedMetaData()
2084 {
2085 return NULL;
2086 }