]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Check that StoreEntry lock does not go negative
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * DEBUG: section 20 Storage Manager
4 * AUTHOR: Harvest Derived
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "CacheDigest.h"
36 #include "CacheManager.h"
37 #include "comm/Connection.h"
38 #include "ETag.h"
39 #include "event.h"
40 #include "fde.h"
41 #include "globals.h"
42 #include "http.h"
43 #include "HttpReply.h"
44 #include "HttpRequest.h"
45 #include "mem_node.h"
46 #include "MemObject.h"
47 #include "mgr/Registration.h"
48 #include "mgr/StoreIoAction.h"
49 #include "profiler/Profiler.h"
50 #include "repl_modules.h"
51 #include "RequestFlags.h"
52 #include "SquidConfig.h"
53 #include "SquidTime.h"
54 #include "Stack.h"
55 #include "StatCounters.h"
56 #include "stmem.h"
57 #include "Store.h"
58 #include "store_digest.h"
59 #include "store_key_md5.h"
60 #include "store_key_md5.h"
61 #include "store_log.h"
62 #include "store_rebuild.h"
63 #include "StoreClient.h"
64 #include "StoreIOState.h"
65 #include "StoreMeta.h"
66 #include "StrList.h"
67 #include "swap_log_op.h"
68 #include "SwapDir.h"
69 #include "tools.h"
70 #if USE_DELAY_POOLS
71 #include "DelayPools.h"
72 #endif
73 #if HAVE_LIMITS_H
74 #include <limits.h>
75 #endif
76
77 #define REBUILD_TIMESTAMP_DELTA_MAX 2
78
79 #define STORE_IN_MEM_BUCKETS (229)
80
81 /** \todo Convert these string constants to enum string-arrays generated */
82
83 const char *memStatusStr[] = {
84 "NOT_IN_MEMORY",
85 "IN_MEMORY"
86 };
87
88 const char *pingStatusStr[] = {
89 "PING_NONE",
90 "PING_WAITING",
91 "PING_DONE"
92 };
93
94 const char *storeStatusStr[] = {
95 "STORE_OK",
96 "STORE_PENDING"
97 };
98
99 const char *swapStatusStr[] = {
100 "SWAPOUT_NONE",
101 "SWAPOUT_WRITING",
102 "SWAPOUT_DONE"
103 };
104
105 /*
106 * This defines an repl type
107 */
108
109 typedef struct _storerepl_entry storerepl_entry_t;
110
111 struct _storerepl_entry {
112 const char *typestr;
113 REMOVALPOLICYCREATE *create;
114 };
115
116 static storerepl_entry_t *storerepl_list = NULL;
117
118 /*
119 * local function prototypes
120 */
121 static int getKeyCounter(void);
122 static OBJH storeCheckCachableStats;
123 static EVH storeLateRelease;
124
125 /*
126 * local variables
127 */
128 static Stack<StoreEntry*> LateReleaseStack;
129 MemAllocator *StoreEntry::pool = NULL;
130
131 StorePointer Store::CurrentRoot = NULL;
132
133 void
134 Store::Root(Store * aRoot)
135 {
136 CurrentRoot = aRoot;
137 }
138
139 void
140 Store::Root(StorePointer aRoot)
141 {
142 Root(aRoot.getRaw());
143 }
144
145 void
146 Store::Stats(StoreEntry * output)
147 {
148 assert (output);
149 Root().stat(*output);
150 }
151
152 void
153 Store::create()
154 {}
155
156 void
157 Store::diskFull()
158 {}
159
160 void
161 Store::sync()
162 {}
163
164 void
165 Store::unlink (StoreEntry &anEntry)
166 {
167 fatal("Store::unlink on invalid Store\n");
168 }
169
170 void *
171 StoreEntry::operator new (size_t bytecount)
172 {
173 assert (bytecount == sizeof (StoreEntry));
174
175 if (!pool) {
176 pool = memPoolCreate ("StoreEntry", bytecount);
177 pool->setChunkSize(2048 * 1024);
178 }
179
180 return pool->alloc();
181 }
182
183 void
184 StoreEntry::operator delete (void *address)
185 {
186 pool->freeOne(address);
187 }
188
189 void
190 StoreEntry::makePublic()
191 {
192 /* This object can be cached for a long time */
193
194 if (!EBIT_TEST(flags, RELEASE_REQUEST))
195 setPublicKey();
196 }
197
198 void
199 StoreEntry::makePrivate()
200 {
201 /* This object should never be cached at all */
202 expireNow();
203 releaseRequest(); /* delete object when not used */
204 }
205
206 void
207 StoreEntry::cacheNegatively()
208 {
209 /* This object may be negatively cached */
210 negativeCache();
211 makePublic();
212 }
213
214 size_t
215 StoreEntry::inUseCount()
216 {
217 if (!pool)
218 return 0;
219 return pool->getInUseCount();
220 }
221
222 const char *
223 StoreEntry::getMD5Text() const
224 {
225 return storeKeyText((const cache_key *)key);
226 }
227
228 #include "comm.h"
229
230 void
231 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
232 {
233 StoreEntry *anEntry = (StoreEntry *)theContext;
234 anEntry->delayAwareRead(aRead.conn,
235 aRead.buf,
236 aRead.len,
237 aRead.callback);
238 }
239
240 void
241 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
242 {
243 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
244 /* sketch: readdeferer* = getdeferer.
245 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
246 */
247
248 if (amountToRead == 0) {
249 assert (mem_obj);
250 /* read ahead limit */
251 /* Perhaps these two calls should both live in MemObject */
252 #if USE_DELAY_POOLS
253 if (!mem_obj->readAheadPolicyCanRead()) {
254 #endif
255 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
256 return;
257 #if USE_DELAY_POOLS
258 }
259
260 /* delay id limit */
261 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
262 return;
263
264 #endif
265
266 }
267
268 if (fd_table[conn->fd].closing()) {
269 // Readers must have closing callbacks if they want to be notified. No
270 // readers appeared to care around 2009/12/14 as they skipped reading
271 // for other reasons. Closing may already be true at the delyaAwareRead
272 // call time or may happen while we wait after delayRead() above.
273 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
274 callback);
275 return; // the read callback will never be called
276 }
277
278 comm_read(conn, buf, amountToRead, callback);
279 }
280
281 size_t
282 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
283 {
284 if (mem_obj == NULL)
285 return aRange.end;
286
287 #if URL_CHECKSUM_DEBUG
288
289 mem_obj->checkUrlChecksum();
290
291 #endif
292
293 if (!mem_obj->readAheadPolicyCanRead())
294 return 0;
295
296 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
297 }
298
299 bool
300 StoreEntry::checkDeferRead(int fd) const
301 {
302 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
303 }
304
305 void
306 StoreEntry::setNoDelay (bool const newValue)
307 {
308 if (mem_obj)
309 mem_obj->setNoDelay(newValue);
310 }
311
312 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
313 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
314 // It does not mean we will read from disk exclusively (or at all!).
315 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
316 // XXX: Collapsed clients cannot predict their type.
317 store_client_t
318 StoreEntry::storeClientType() const
319 {
320 /* The needed offset isn't in memory
321 * XXX TODO: this is wrong for range requests
322 * as the needed offset may *not* be 0, AND
323 * offset 0 in the memory object is the HTTP headers.
324 */
325
326 assert(mem_obj);
327
328 if (mem_obj->inmem_lo)
329 return STORE_DISK_CLIENT;
330
331 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
332 /* I don't think we should be adding clients to aborted entries */
333 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
334 return STORE_MEM_CLIENT;
335 }
336
337 if (store_status == STORE_OK) {
338 /* the object has completed. */
339
340 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
341 if (swap_status == SWAPOUT_DONE) {
342 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
343 if (mem_obj->endOffset() == mem_obj->object_sz) {
344 /* hot object fully swapped in (XXX: or swapped out?) */
345 return STORE_MEM_CLIENT;
346 }
347 } else {
348 /* Memory-only, or currently being swapped out */
349 return STORE_MEM_CLIENT;
350 }
351 }
352 return STORE_DISK_CLIENT;
353 }
354
355 /* here and past, entry is STORE_PENDING */
356 /*
357 * If this is the first client, let it be the mem client
358 */
359 if (mem_obj->nclients == 1)
360 return STORE_MEM_CLIENT;
361
362 /*
363 * If there is no disk file to open yet, we must make this a
364 * mem client. If we can't open the swapin file before writing
365 * to the client, there is no guarantee that we will be able
366 * to open it later when we really need it.
367 */
368 if (swap_status == SWAPOUT_NONE)
369 return STORE_MEM_CLIENT;
370
371 /*
372 * otherwise, make subsequent clients read from disk so they
373 * can not delay the first, and vice-versa.
374 */
375 return STORE_DISK_CLIENT;
376 }
377
378 StoreEntry::StoreEntry() :
379 mem_obj(NULL),
380 timestamp(-1),
381 lastref(-1),
382 expires(-1),
383 lastmod(-1),
384 swap_file_sz(0),
385 refcount(0),
386 flags(0),
387 swap_filen(-1),
388 swap_dirn(-1),
389 mem_status(NOT_IN_MEMORY),
390 ping_status(PING_NONE),
391 store_status(STORE_PENDING),
392 swap_status(SWAPOUT_NONE),
393 lock_count(0)
394 {
395 debugs(20, 3, HERE << "new StoreEntry " << this);
396 }
397
398 StoreEntry::~StoreEntry()
399 {
400 }
401
402 #if USE_ADAPTATION
403 void
404 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
405 {
406 if (!deferredProducer)
407 deferredProducer = producer;
408 else
409 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
410 *deferredProducer << ", requested call: " << *producer);
411 }
412
413 void
414 StoreEntry::kickProducer()
415 {
416 if (deferredProducer != NULL) {
417 ScheduleCallHere(deferredProducer);
418 deferredProducer = NULL;
419 }
420 }
421 #endif
422
423 void
424 StoreEntry::destroyMemObject()
425 {
426 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
427
428 if (MemObject *mem = mem_obj) {
429 // Store::Root() is FATALly missing during shutdown
430 if (mem->xitTable.index >= 0 && !shutting_down)
431 Store::Root().transientsDisconnect(*mem);
432 if (mem->memCache.index >= 0 && !shutting_down)
433 Store::Root().memoryDisconnect(*this);
434
435 setMemStatus(NOT_IN_MEMORY);
436 mem_obj = NULL;
437 delete mem;
438 }
439 }
440
441 void
442 destroyStoreEntry(void *data)
443 {
444 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
445 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
446 assert(e != NULL);
447
448 if (e == NullStoreEntry::getInstance())
449 return;
450
451 // Store::Root() is FATALly missing during shutdown
452 if (e->swap_filen >= 0 && !shutting_down) {
453 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
454 sd.disconnect(*e);
455 }
456
457 e->destroyMemObject();
458
459 e->hashDelete();
460
461 assert(e->key == NULL);
462
463 delete e;
464 }
465
466 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
467
468 void
469 StoreEntry::hashInsert(const cache_key * someKey)
470 {
471 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
472 key = storeKeyDup(someKey);
473 hash_join(store_table, this);
474 }
475
476 void
477 StoreEntry::hashDelete()
478 {
479 if (key) { // some test cases do not create keys and do not hashInsert()
480 hash_remove_link(store_table, this);
481 storeKeyFree((const cache_key *)key);
482 key = NULL;
483 }
484 }
485
486 /* -------------------------------------------------------------------------- */
487
488 /* get rid of memory copy of the object */
489 void
490 StoreEntry::purgeMem()
491 {
492 if (mem_obj == NULL)
493 return;
494
495 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
496
497 Store::Root().memoryUnlink(*this);
498
499 if (swap_status != SWAPOUT_DONE)
500 release();
501 }
502
503 void
504 StoreEntry::lock(const char *context)
505 {
506 ++lock_count;
507 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
508 }
509
510 void
511 StoreEntry::touch() {
512 lastref = squid_curtime;
513 Store::Root().reference(*this);
514 }
515
516 void
517 StoreEntry::setReleaseFlag()
518 {
519 if (EBIT_TEST(flags, RELEASE_REQUEST))
520 return;
521
522 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
523
524 EBIT_SET(flags, RELEASE_REQUEST);
525
526 Store::Root().markForUnlink(*this);
527 }
528
529 void
530 StoreEntry::releaseRequest()
531 {
532 if (EBIT_TEST(flags, RELEASE_REQUEST))
533 return;
534
535 setReleaseFlag(); // makes validToSend() false, preventing future hits
536
537 setPrivateKey();
538 }
539
540 int
541 StoreEntry::unlock(const char *context)
542 {
543 debugs(20, 3, (context ? context : "somebody") <<
544 " unlocking key " << getMD5Text() << ' ' << *this);
545 assert(lock_count > 0);
546 --lock_count;
547
548 if (lock_count)
549 return (int) lock_count;
550
551 if (store_status == STORE_PENDING)
552 setReleaseFlag();
553
554 assert(storePendingNClients(this) == 0);
555
556 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
557 this->release();
558 return 0;
559 }
560
561 if (EBIT_TEST(flags, KEY_PRIVATE))
562 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
563
564 Store::Root().handleIdleEntry(*this); // may delete us
565 return 0;
566 }
567
568 void
569 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
570 {
571 assert (aClient);
572 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
573
574 if (!result)
575 aClient->created (NullStoreEntry::getInstance());
576 else
577 aClient->created (result);
578 }
579
580 void
581 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
582 {
583 assert (aClient);
584 StoreEntry *result = storeGetPublicByRequest (request);
585
586 if (!result)
587 result = NullStoreEntry::getInstance();
588
589 aClient->created (result);
590 }
591
592 void
593 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
594 {
595 assert (aClient);
596 StoreEntry *result = storeGetPublic (uri, method);
597
598 if (!result)
599 result = NullStoreEntry::getInstance();
600
601 aClient->created (result);
602 }
603
604 StoreEntry *
605 storeGetPublic(const char *uri, const HttpRequestMethod& method)
606 {
607 return Store::Root().get(storeKeyPublic(uri, method));
608 }
609
610 StoreEntry *
611 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
612 {
613 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
614 }
615
616 StoreEntry *
617 storeGetPublicByRequest(HttpRequest * req)
618 {
619 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
620
621 if (e == NULL && req->method == Http::METHOD_HEAD)
622 /* We can generate a HEAD reply from a cached GET object */
623 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
624
625 return e;
626 }
627
628 static int
629 getKeyCounter(void)
630 {
631 static int key_counter = 0;
632
633 if (++key_counter < 0)
634 key_counter = 1;
635
636 return key_counter;
637 }
638
639 /* RBC 20050104 AFAICT this should become simpler:
640 * rather than reinserting with a special key it should be marked
641 * as 'released' and then cleaned up when refcounting indicates.
642 * the StoreHashIndex could well implement its 'released' in the
643 * current manner.
644 * Also, clean log writing should skip over ia,t
645 * Otherwise, we need a 'remove from the index but not the store
646 * concept'.
647 */
648 void
649 StoreEntry::setPrivateKey()
650 {
651 const cache_key *newkey;
652
653 if (key && EBIT_TEST(flags, KEY_PRIVATE))
654 return; /* is already private */
655
656 if (key) {
657 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
658
659 // TODO: move into SwapDir::markForUnlink() already called by Root()
660 if (swap_filen > -1)
661 storeDirSwapLog(this, SWAP_LOG_DEL);
662
663 hashDelete();
664 }
665
666 if (mem_obj && mem_obj->hasUris()) {
667 mem_obj->id = getKeyCounter();
668 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
669 } else {
670 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
671 }
672
673 assert(hash_lookup(store_table, newkey) == NULL);
674 EBIT_SET(flags, KEY_PRIVATE);
675 hashInsert(newkey);
676 }
677
678 void
679 StoreEntry::setPublicKey()
680 {
681 const cache_key *newkey;
682
683 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
684 return; /* is already public */
685
686 assert(mem_obj);
687
688 /*
689 * We can't make RELEASE_REQUEST objects public. Depending on
690 * when RELEASE_REQUEST gets set, we might not be swapping out
691 * the object. If we're not swapping out, then subsequent
692 * store clients won't be able to access object data which has
693 * been freed from memory.
694 *
695 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
696 */
697 #if MORE_DEBUG_OUTPUT
698
699 if (EBIT_TEST(flags, RELEASE_REQUEST))
700 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
701
702 #endif
703
704 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
705
706 if (mem_obj->request) {
707 HttpRequest *request = mem_obj->request;
708
709 if (!mem_obj->vary_headers) {
710 /* First handle the case where the object no longer varies */
711 safe_free(request->vary_headers);
712 } else {
713 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
714 /* Oops.. the variance has changed. Kill the base object
715 * to record the new variance key
716 */
717 safe_free(request->vary_headers); /* free old "bad" variance key */
718 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
719 pe->release();
720 }
721
722 /* Make sure the request knows the variance status */
723 if (!request->vary_headers) {
724 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
725
726 if (vary)
727 request->vary_headers = xstrdup(vary);
728 }
729 }
730
731 // TODO: storeGetPublic() calls below may create unlocked entries.
732 // We should add/use storeHas() API or lock/unlock those entries.
733 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
734 /* Create "vary" base object */
735 String vary;
736 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
737 /* We are allowed to do this typecast */
738 HttpReply *rep = new HttpReply;
739 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
740 vary = mem_obj->getReply()->header.getList(HDR_VARY);
741
742 if (vary.size()) {
743 /* Again, we own this structure layout */
744 rep->header.putStr(HDR_VARY, vary.termedBuf());
745 vary.clean();
746 }
747
748 #if X_ACCELERATOR_VARY
749 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
750
751 if (vary.size() > 0) {
752 /* Again, we own this structure layout */
753 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
754 vary.clean();
755 }
756
757 #endif
758 pe->replaceHttpReply(rep, false); // no write until key is public
759
760 pe->timestampsSet();
761
762 pe->makePublic();
763
764 pe->startWriting(); // after makePublic()
765
766 pe->complete();
767
768 pe->unlock("StoreEntry::setPublicKey+Vary");
769 }
770
771 newkey = storeKeyPublicByRequest(mem_obj->request);
772 } else
773 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
774
775 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
776 debugs(20, 3, "Making old " << *e2 << " private.");
777 e2->setPrivateKey();
778 e2->release();
779
780 if (mem_obj->request)
781 newkey = storeKeyPublicByRequest(mem_obj->request);
782 else
783 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
784 }
785
786 if (key)
787 hashDelete();
788
789 EBIT_CLR(flags, KEY_PRIVATE);
790
791 hashInsert(newkey);
792
793 if (swap_filen > -1)
794 storeDirSwapLog(this, SWAP_LOG_ADD);
795 }
796
797 StoreEntry *
798 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
799 {
800 StoreEntry *e = NULL;
801 debugs(20, 3, "storeCreateEntry: '" << url << "'");
802
803 e = new StoreEntry();
804 e->makeMemObject();
805 e->mem_obj->setUris(url, log_url, method);
806
807 if (flags.cachable) {
808 EBIT_CLR(e->flags, RELEASE_REQUEST);
809 } else {
810 e->releaseRequest();
811 }
812
813 e->store_status = STORE_PENDING;
814 e->refcount = 0;
815 e->lastref = squid_curtime;
816 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
817 e->ping_status = PING_NONE;
818 EBIT_SET(e->flags, ENTRY_VALIDATED);
819 return e;
820 }
821
822 StoreEntry *
823 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
824 {
825 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
826 e->lock("storeCreateEntry");
827
828 if (neighbors_do_private_keys || !flags.hierarchical)
829 e->setPrivateKey();
830 else
831 e->setPublicKey();
832
833 return e;
834 }
835
836 /* Mark object as expired */
837 void
838 StoreEntry::expireNow()
839 {
840 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
841 expires = squid_curtime;
842 }
843
844 void
845 StoreEntry::write (StoreIOBuffer writeBuffer)
846 {
847 assert(mem_obj != NULL);
848 /* This assert will change when we teach the store to update */
849 PROF_start(StoreEntry_write);
850 assert(store_status == STORE_PENDING);
851
852 // XXX: caller uses content offset, but we also store headers
853 if (const HttpReply *reply = mem_obj->getReply())
854 writeBuffer.offset += reply->hdr_sz;
855
856 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
857 PROF_stop(StoreEntry_write);
858 storeGetMemSpace(writeBuffer.length);
859 mem_obj->write(writeBuffer);
860
861 if (!EBIT_TEST(flags, DELAY_SENDING))
862 invokeHandlers();
863 }
864
865 /* Append incoming data from a primary server to an entry. */
866 void
867 StoreEntry::append(char const *buf, int len)
868 {
869 assert(mem_obj != NULL);
870 assert(len >= 0);
871 assert(store_status == STORE_PENDING);
872
873 StoreIOBuffer tempBuffer;
874 tempBuffer.data = (char *)buf;
875 tempBuffer.length = len;
876 /*
877 * XXX sigh, offset might be < 0 here, but it gets "corrected"
878 * later. This offset crap is such a mess.
879 */
880 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
881 write(tempBuffer);
882 }
883
884 void
885 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
886 {
887 va_list args;
888 va_start(args, fmt);
889
890 storeAppendVPrintf(e, fmt, args);
891 va_end(args);
892 }
893
894 /* used be storeAppendPrintf and Packer */
895 void
896 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
897 {
898 LOCAL_ARRAY(char, buf, 4096);
899 buf[0] = '\0';
900 vsnprintf(buf, 4096, fmt, vargs);
901 e->append(buf, strlen(buf));
902 }
903
904 struct _store_check_cachable_hist {
905
906 struct {
907 int non_get;
908 int not_entry_cachable;
909 int wrong_content_length;
910 int negative_cached;
911 int too_big;
912 int too_small;
913 int private_key;
914 int too_many_open_files;
915 int too_many_open_fds;
916 } no;
917
918 struct {
919 int Default;
920 } yes;
921 } store_check_cachable_hist;
922
923 int
924 storeTooManyDiskFilesOpen(void)
925 {
926 if (Config.max_open_disk_fds == 0)
927 return 0;
928
929 if (store_open_disk_fd > Config.max_open_disk_fds)
930 return 1;
931
932 return 0;
933 }
934
935 int
936 StoreEntry::checkTooSmall()
937 {
938 if (EBIT_TEST(flags, ENTRY_SPECIAL))
939 return 0;
940
941 if (STORE_OK == store_status)
942 if (mem_obj->object_sz < 0 ||
943 mem_obj->object_sz < Config.Store.minObjectSize)
944 return 1;
945 if (getReply()->content_length > -1)
946 if (getReply()->content_length < Config.Store.minObjectSize)
947 return 1;
948 return 0;
949 }
950
951 // TODO: remove checks already performed by swapoutPossible()
952 // TODO: move "too many open..." checks outside -- we are called too early/late
953 int
954 StoreEntry::checkCachable()
955 {
956 #if CACHE_ALL_METHODS
957
958 if (mem_obj->method != Http::METHOD_GET) {
959 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
960 ++store_check_cachable_hist.no.non_get;
961 } else
962 #endif
963 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
964 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
965 ++store_check_cachable_hist.no.wrong_content_length;
966 } else if (EBIT_TEST(flags, RELEASE_REQUEST)) {
967 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
968 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
969 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
970 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
971 ++store_check_cachable_hist.no.negative_cached;
972 return 0; /* avoid release call below */
973 } else if ((getReply()->content_length > 0 &&
974 getReply()->content_length > store_maxobjsize) ||
975 mem_obj->endOffset() > store_maxobjsize) {
976 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
977 ++store_check_cachable_hist.no.too_big;
978 } else if (checkTooSmall()) {
979 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
980 ++store_check_cachable_hist.no.too_small;
981 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
982 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
983 ++store_check_cachable_hist.no.private_key;
984 } else if (swap_status != SWAPOUT_NONE) {
985 /*
986 * here we checked the swap_status because the remaining
987 * cases are only relevant only if we haven't started swapping
988 * out the object yet.
989 */
990 return 1;
991 } else if (storeTooManyDiskFilesOpen()) {
992 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
993 ++store_check_cachable_hist.no.too_many_open_files;
994 } else if (fdNFree() < RESERVED_FD) {
995 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
996 ++store_check_cachable_hist.no.too_many_open_fds;
997 } else {
998 ++store_check_cachable_hist.yes.Default;
999 return 1;
1000 }
1001
1002 releaseRequest();
1003 return 0;
1004 }
1005
1006 void
1007 storeCheckCachableStats(StoreEntry *sentry)
1008 {
1009 storeAppendPrintf(sentry, "Category\t Count\n");
1010
1011 #if CACHE_ALL_METHODS
1012
1013 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1014 store_check_cachable_hist.no.non_get);
1015 #endif
1016
1017 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1018 store_check_cachable_hist.no.not_entry_cachable);
1019 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1020 store_check_cachable_hist.no.wrong_content_length);
1021 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1022 store_check_cachable_hist.no.negative_cached);
1023 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1024 store_check_cachable_hist.no.too_big);
1025 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1026 store_check_cachable_hist.no.too_small);
1027 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1028 store_check_cachable_hist.no.private_key);
1029 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1030 store_check_cachable_hist.no.too_many_open_files);
1031 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1032 store_check_cachable_hist.no.too_many_open_fds);
1033 storeAppendPrintf(sentry, "yes.default\t%d\n",
1034 store_check_cachable_hist.yes.Default);
1035 }
1036
1037 void
1038 StoreEntry::complete()
1039 {
1040 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1041
1042 if (store_status != STORE_PENDING) {
1043 /*
1044 * if we're not STORE_PENDING, then probably we got aborted
1045 * and there should be NO clients on this entry
1046 */
1047 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1048 assert(mem_obj->nclients == 0);
1049 return;
1050 }
1051
1052 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1053 * in use of object_sz?
1054 */
1055 mem_obj->object_sz = mem_obj->endOffset();
1056
1057 store_status = STORE_OK;
1058
1059 assert(mem_status == NOT_IN_MEMORY);
1060
1061 if (!validLength()) {
1062 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1063 releaseRequest();
1064 }
1065
1066 #if USE_CACHE_DIGESTS
1067 if (mem_obj->request)
1068 mem_obj->request->hier.store_complete_stop = current_time;
1069
1070 #endif
1071 /*
1072 * We used to call invokeHandlers, then storeSwapOut. However,
1073 * Madhukar Reddy <myreddy@persistence.com> reported that
1074 * responses without content length would sometimes get released
1075 * in client_side, thinking that the response is incomplete.
1076 */
1077 invokeHandlers();
1078 }
1079
1080 /*
1081 * Someone wants to abort this transfer. Set the reason in the
1082 * request structure, call the server-side callback and mark the
1083 * entry for releasing
1084 */
1085 void
1086 StoreEntry::abort()
1087 {
1088 ++statCounter.aborted_requests;
1089 assert(store_status == STORE_PENDING);
1090 assert(mem_obj != NULL);
1091 debugs(20, 6, "storeAbort: " << getMD5Text());
1092
1093 lock("StoreEntry::abort"); /* lock while aborting */
1094 negativeCache();
1095
1096 releaseRequest();
1097
1098 EBIT_SET(flags, ENTRY_ABORTED);
1099
1100 setMemStatus(NOT_IN_MEMORY);
1101
1102 store_status = STORE_OK;
1103
1104 /* Notify the server side */
1105
1106 /*
1107 * DPW 2007-05-07
1108 * Should we check abort.data for validity?
1109 */
1110 if (mem_obj->abort.callback) {
1111 if (!cbdataReferenceValid(mem_obj->abort.data))
1112 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1113 eventAdd("mem_obj->abort.callback",
1114 mem_obj->abort.callback,
1115 mem_obj->abort.data,
1116 0.0,
1117 true);
1118 unregisterAbort();
1119 }
1120
1121 /* XXX Should we reverse these two, so that there is no
1122 * unneeded disk swapping triggered?
1123 */
1124 /* Notify the client side */
1125 invokeHandlers();
1126
1127 // abort swap out, invalidating what was created so far (release follows)
1128 swapOutFileClose(StoreIOState::writerGone);
1129
1130 unlock("StoreEntry::abort"); /* unlock */
1131 }
1132
1133 /**
1134 * Clear Memory storage to accommodate the given object len
1135 */
1136 void
1137 storeGetMemSpace(int size)
1138 {
1139 PROF_start(storeGetMemSpace);
1140 StoreEntry *e = NULL;
1141 int released = 0;
1142 static time_t last_check = 0;
1143 size_t pages_needed;
1144 RemovalPurgeWalker *walker;
1145
1146 if (squid_curtime == last_check) {
1147 PROF_stop(storeGetMemSpace);
1148 return;
1149 }
1150
1151 last_check = squid_curtime;
1152
1153 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1154
1155 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1156 PROF_stop(storeGetMemSpace);
1157 return;
1158 }
1159
1160 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1161 " pages");
1162
1163 /* XXX what to set as max_scan here? */
1164 walker = mem_policy->PurgeInit(mem_policy, 100000);
1165
1166 while ((e = walker->Next(walker))) {
1167 e->purgeMem();
1168 ++released;
1169
1170 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1171 break;
1172 }
1173
1174 walker->Done(walker);
1175 debugs(20, 3, "storeGetMemSpace stats:");
1176 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1177 debugs(20, 3, " " << std::setw(6) << released << " were released");
1178 PROF_stop(storeGetMemSpace);
1179 }
1180
1181 /* thunk through to Store::Root().maintain(). Note that this would be better still
1182 * if registered against the root store itself, but that requires more complex
1183 * update logic - bigger fish to fry first. Long term each store when
1184 * it becomes active will self register
1185 */
1186 void
1187 Store::Maintain(void *notused)
1188 {
1189 Store::Root().maintain();
1190
1191 /* Reregister a maintain event .. */
1192 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1193
1194 }
1195
1196 /* The maximum objects to scan for maintain storage space */
1197 #define MAINTAIN_MAX_SCAN 1024
1198 #define MAINTAIN_MAX_REMOVE 64
1199
1200 /*
1201 * This routine is to be called by main loop in main.c.
1202 * It removes expired objects on only one bucket for each time called.
1203 *
1204 * This should get called 1/s from main().
1205 */
1206 void
1207 StoreController::maintain()
1208 {
1209 static time_t last_warn_time = 0;
1210
1211 PROF_start(storeMaintainSwapSpace);
1212 swapDir->maintain();
1213
1214 /* this should be emitted by the oversize dir, not globally */
1215
1216 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1217 if (squid_curtime - last_warn_time > 10) {
1218 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1219 << Store::Root().currentSize() / 1024.0 << " KB > "
1220 << (Store::Root().maxSize() >> 10) << " KB");
1221 last_warn_time = squid_curtime;
1222 }
1223 }
1224
1225 PROF_stop(storeMaintainSwapSpace);
1226 }
1227
1228 /* release an object from a cache */
1229 void
1230 StoreEntry::release()
1231 {
1232 PROF_start(storeRelease);
1233 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1234 /* If, for any reason we can't discard this object because of an
1235 * outstanding request, mark it for pending release */
1236
1237 if (locked()) {
1238 expireNow();
1239 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1240 releaseRequest();
1241 PROF_stop(storeRelease);
1242 return;
1243 }
1244
1245 Store::Root().memoryUnlink(*this);
1246
1247 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1248 setPrivateKey();
1249
1250 if (swap_filen > -1) {
1251 // lock the entry until rebuilding is done
1252 lock("storeLateRelease");
1253 setReleaseFlag();
1254 LateReleaseStack.push_back(this);
1255 } else {
1256 destroyStoreEntry(static_cast<hash_link *>(this));
1257 // "this" is no longer valid
1258 }
1259
1260 PROF_stop(storeRelease);
1261 return;
1262 }
1263
1264 storeLog(STORE_LOG_RELEASE, this);
1265
1266 if (swap_filen > -1) {
1267 // log before unlink() below clears swap_filen
1268 if (!EBIT_TEST(flags, KEY_PRIVATE))
1269 storeDirSwapLog(this, SWAP_LOG_DEL);
1270
1271 unlink();
1272 }
1273
1274 destroyStoreEntry(static_cast<hash_link *>(this));
1275 PROF_stop(storeRelease);
1276 }
1277
1278 static void
1279 storeLateRelease(void *unused)
1280 {
1281 StoreEntry *e;
1282 int i;
1283 static int n = 0;
1284
1285 if (StoreController::store_dirs_rebuilding) {
1286 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1287 return;
1288 }
1289
1290 for (i = 0; i < 10; ++i) {
1291 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1292
1293 if (e == NULL) {
1294 /* done! */
1295 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1296 return;
1297 }
1298
1299 e->unlock("storeLateRelease");
1300 ++n;
1301 }
1302
1303 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1304 }
1305
1306 /* return 1 if a store entry is locked */
1307 int
1308 StoreEntry::locked() const
1309 {
1310 if (lock_count)
1311 return 1;
1312
1313 /*
1314 * SPECIAL, PUBLIC entries should be "locked";
1315 * XXX: Their owner should lock them then instead of relying on this hack.
1316 */
1317 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1318 if (!EBIT_TEST(flags, KEY_PRIVATE))
1319 return 1;
1320
1321 return 0;
1322 }
1323
1324 bool
1325 StoreEntry::validLength() const
1326 {
1327 int64_t diff;
1328 const HttpReply *reply;
1329 assert(mem_obj != NULL);
1330 reply = getReply();
1331 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1332 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1333 objectLen());
1334 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1335 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1336
1337 if (reply->content_length < 0) {
1338 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1339 return 1;
1340 }
1341
1342 if (reply->hdr_sz == 0) {
1343 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1344 return 1;
1345 }
1346
1347 if (mem_obj->method == Http::METHOD_HEAD) {
1348 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1349 return 1;
1350 }
1351
1352 if (reply->sline.status() == Http::scNotModified)
1353 return 1;
1354
1355 if (reply->sline.status() == Http::scNoContent)
1356 return 1;
1357
1358 diff = reply->hdr_sz + reply->content_length - objectLen();
1359
1360 if (diff == 0)
1361 return 1;
1362
1363 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1364
1365 return 0;
1366 }
1367
1368 static void
1369 storeRegisterWithCacheManager(void)
1370 {
1371 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1372 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1373 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1374 storeCheckCachableStats, 0, 1);
1375 }
1376
1377 void
1378 storeInit(void)
1379 {
1380 storeKeyInit();
1381 mem_policy = createRemovalPolicy(Config.memPolicy);
1382 storeDigestInit();
1383 storeLogOpen();
1384 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1385 Store::Root().init();
1386 storeRebuildStart();
1387
1388 storeRegisterWithCacheManager();
1389 }
1390
1391 void
1392 storeConfigure(void)
1393 {
1394 store_swap_high = (long) (((float) Store::Root().maxSize() *
1395 (float) Config.Swap.highWaterMark) / (float) 100);
1396 store_swap_low = (long) (((float) Store::Root().maxSize() *
1397 (float) Config.Swap.lowWaterMark) / (float) 100);
1398 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1399 }
1400
1401 bool
1402 StoreEntry::memoryCachable() const
1403 {
1404 if (mem_obj == NULL)
1405 return 0;
1406
1407 if (mem_obj->data_hdr.size() == 0)
1408 return 0;
1409
1410 if (mem_obj->inmem_lo != 0)
1411 return 0;
1412
1413 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1414 return 0;
1415
1416 return 1;
1417 }
1418
1419 int
1420 StoreEntry::checkNegativeHit() const
1421 {
1422 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1423 return 0;
1424
1425 if (expires <= squid_curtime)
1426 return 0;
1427
1428 if (store_status != STORE_OK)
1429 return 0;
1430
1431 return 1;
1432 }
1433
1434 /**
1435 * Set object for negative caching.
1436 * Preserves any expiry information given by the server.
1437 * In absence of proper expiry info it will set to expire immediately,
1438 * or with HTTP-violations enabled the configured negative-TTL is observed
1439 */
1440 void
1441 StoreEntry::negativeCache()
1442 {
1443 // XXX: should make the default for expires 0 instead of -1
1444 // so we can distinguish "Expires: -1" from nothing.
1445 if (expires <= 0)
1446 #if USE_HTTP_VIOLATIONS
1447 expires = squid_curtime + Config.negativeTtl;
1448 #else
1449 expires = squid_curtime;
1450 #endif
1451 EBIT_SET(flags, ENTRY_NEGCACHED);
1452 }
1453
1454 void
1455 storeFreeMemory(void)
1456 {
1457 Store::Root(NULL);
1458 #if USE_CACHE_DIGESTS
1459
1460 if (store_digest)
1461 cacheDigestDestroy(store_digest);
1462
1463 #endif
1464
1465 store_digest = NULL;
1466 }
1467
1468 int
1469 expiresMoreThan(time_t expires, time_t when)
1470 {
1471 if (expires < 0) /* No Expires given */
1472 return 1;
1473
1474 return (expires > (squid_curtime + when));
1475 }
1476
1477 int
1478 StoreEntry::validToSend() const
1479 {
1480 if (EBIT_TEST(flags, RELEASE_REQUEST))
1481 return 0;
1482
1483 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1484 if (expires <= squid_curtime)
1485 return 0;
1486
1487 if (EBIT_TEST(flags, ENTRY_ABORTED))
1488 return 0;
1489
1490 // now check that the entry has a cache backing or is collapsed
1491 if (swap_filen > -1) // backed by a disk cache
1492 return 1;
1493
1494 if (swappingOut()) // will be backed by a disk cache
1495 return 1;
1496
1497 if (!mem_obj) // not backed by a memory cache and not collapsed
1498 return 0;
1499
1500 if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1501 return 0;
1502
1503 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1504 // disk cache backing so we should not rely on the store cache at all. This
1505 // is wrong for range requests that could feed off nibbled memory (XXX).
1506 if (mem_obj->inmem_lo) // in local memory cache, but got nibbled at
1507 return 0;
1508
1509 return 1;
1510 }
1511
1512 void
1513 StoreEntry::timestampsSet()
1514 {
1515 const HttpReply *reply = getReply();
1516 time_t served_date = reply->date;
1517 int age = reply->header.getInt(HDR_AGE);
1518 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1519 /* make sure that 0 <= served_date <= squid_curtime */
1520
1521 if (served_date < 0 || served_date > squid_curtime)
1522 served_date = squid_curtime;
1523
1524 /* Bug 1791:
1525 * If the returned Date: is more than 24 hours older than
1526 * the squid_curtime, then one of us needs to use NTP to set our
1527 * clock. We'll pretend that our clock is right.
1528 */
1529 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1530 served_date = squid_curtime;
1531
1532 /*
1533 * Compensate with Age header if origin server clock is ahead
1534 * of us and there is a cache in between us and the origin
1535 * server. But DONT compensate if the age value is larger than
1536 * squid_curtime because it results in a negative served_date.
1537 */
1538 if (age > squid_curtime - served_date)
1539 if (squid_curtime > age)
1540 served_date = squid_curtime - age;
1541
1542 // compensate for Squid-to-server and server-to-Squid delays
1543 if (mem_obj && mem_obj->request) {
1544 const time_t request_sent =
1545 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1546 if (0 < request_sent && request_sent < squid_curtime)
1547 served_date -= (squid_curtime - request_sent);
1548 }
1549
1550 if (reply->expires > 0 && reply->date > -1)
1551 expires = served_date + (reply->expires - reply->date);
1552 else
1553 expires = reply->expires;
1554
1555 lastmod = reply->last_modified;
1556
1557 timestamp = served_date;
1558 }
1559
1560 void
1561 StoreEntry::registerAbort(STABH * cb, void *data)
1562 {
1563 assert(mem_obj);
1564 assert(mem_obj->abort.callback == NULL);
1565 mem_obj->abort.callback = cb;
1566 mem_obj->abort.data = cbdataReference(data);
1567 }
1568
1569 void
1570 StoreEntry::unregisterAbort()
1571 {
1572 assert(mem_obj);
1573 if (mem_obj->abort.callback) {
1574 mem_obj->abort.callback = NULL;
1575 cbdataReferenceDone(mem_obj->abort.data);
1576 }
1577 }
1578
1579 void
1580 StoreEntry::dump(int l) const
1581 {
1582 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1583 debugs(20, l, "StoreEntry->next: " << next);
1584 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1585 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1586 debugs(20, l, "StoreEntry->lastref: " << lastref);
1587 debugs(20, l, "StoreEntry->expires: " << expires);
1588 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1589 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1590 debugs(20, l, "StoreEntry->refcount: " << refcount);
1591 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1592 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1593 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1594 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1595 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1596 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1597 debugs(20, l, "StoreEntry->store_status: " << store_status);
1598 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1599 }
1600
1601 /*
1602 * NOTE, this function assumes only two mem states
1603 */
1604 void
1605 StoreEntry::setMemStatus(mem_status_t new_status)
1606 {
1607 if (new_status == mem_status)
1608 return;
1609
1610 // are we using a shared memory cache?
1611 if (Config.memShared && IamWorkerProcess()) {
1612 // This method was designed to update replacement policy, not to
1613 // actually purge something from the memory cache (TODO: rename?).
1614 // Shared memory cache does not have a policy that needs updates.
1615 mem_status = new_status;
1616 return;
1617 }
1618
1619 assert(mem_obj != NULL);
1620
1621 if (new_status == IN_MEMORY) {
1622 assert(mem_obj->inmem_lo == 0);
1623
1624 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1625 debugs(20, 4, "not inserting special " << *this << " into policy");
1626 } else {
1627 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1628 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1629 }
1630
1631 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1632 } else {
1633 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1634 debugs(20, 4, "not removing special " << *this << " from policy");
1635 } else {
1636 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1637 debugs(20, 4, "removed " << *this);
1638 }
1639
1640 --hot_obj_count;
1641 }
1642
1643 mem_status = new_status;
1644 }
1645
1646 const char *
1647 StoreEntry::url() const
1648 {
1649 if (this == NULL)
1650 return "[null_entry]";
1651 else if (mem_obj == NULL)
1652 return "[null_mem_obj]";
1653 else
1654 return mem_obj->storeId();
1655 }
1656
1657 MemObject *
1658 StoreEntry::makeMemObject()
1659 {
1660 if (!mem_obj)
1661 mem_obj = new MemObject();
1662 return mem_obj;
1663 }
1664
1665 void
1666 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1667 {
1668 makeMemObject();
1669 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1670 }
1671
1672 /* this just sets DELAY_SENDING */
1673 void
1674 StoreEntry::buffer()
1675 {
1676 EBIT_SET(flags, DELAY_SENDING);
1677 }
1678
1679 /* this just clears DELAY_SENDING and Invokes the handlers */
1680 void
1681 StoreEntry::flush()
1682 {
1683 if (EBIT_TEST(flags, DELAY_SENDING)) {
1684 EBIT_CLR(flags, DELAY_SENDING);
1685 invokeHandlers();
1686 }
1687 }
1688
1689 int64_t
1690 StoreEntry::objectLen() const
1691 {
1692 assert(mem_obj != NULL);
1693 return mem_obj->object_sz;
1694 }
1695
1696 int64_t
1697 StoreEntry::contentLen() const
1698 {
1699 assert(mem_obj != NULL);
1700 assert(getReply() != NULL);
1701 return objectLen() - getReply()->hdr_sz;
1702 }
1703
1704 HttpReply const *
1705 StoreEntry::getReply () const
1706 {
1707 if (NULL == mem_obj)
1708 return NULL;
1709
1710 return mem_obj->getReply();
1711 }
1712
1713 void
1714 StoreEntry::reset()
1715 {
1716 assert (mem_obj);
1717 debugs(20, 3, "StoreEntry::reset: " << url());
1718 mem_obj->reset();
1719 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1720 rep->reset();
1721 expires = lastmod = timestamp = -1;
1722 }
1723
1724 /*
1725 * storeFsInit
1726 *
1727 * This routine calls the SETUP routine for each fs type.
1728 * I don't know where the best place for this is, and I'm not going to shuffle
1729 * around large chunks of code right now (that can be done once its working.)
1730 */
1731 void
1732 storeFsInit(void)
1733 {
1734 storeReplSetup();
1735 }
1736
1737 /*
1738 * called to add another store removal policy module
1739 */
1740 void
1741 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1742 {
1743 int i;
1744
1745 /* find the number of currently known repl types */
1746 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1747 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1748 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1749 return;
1750 }
1751 }
1752
1753 /* add the new type */
1754 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1755
1756 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1757
1758 storerepl_list[i].typestr = type;
1759
1760 storerepl_list[i].create = create;
1761 }
1762
1763 /*
1764 * Create a removal policy instance
1765 */
1766 RemovalPolicy *
1767 createRemovalPolicy(RemovalPolicySettings * settings)
1768 {
1769 storerepl_entry_t *r;
1770
1771 for (r = storerepl_list; r && r->typestr; ++r) {
1772 if (strcmp(r->typestr, settings->type) == 0)
1773 return r->create(settings->args);
1774 }
1775
1776 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1777 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1778 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1779 fatalf("ERROR: Unknown policy %s\n", settings->type);
1780 return NULL; /* NOTREACHED */
1781 }
1782
1783 #if 0
1784 void
1785 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1786 {
1787 if (e->swap_file_number == filn)
1788 return;
1789
1790 if (filn < 0) {
1791 assert(-1 == filn);
1792 storeDirMapBitReset(e->swap_file_number);
1793 storeDirLRUDelete(e);
1794 e->swap_file_number = -1;
1795 } else {
1796 assert(-1 == e->swap_file_number);
1797 storeDirMapBitSet(e->swap_file_number = filn);
1798 storeDirLRUAdd(e);
1799 }
1800 }
1801
1802 #endif
1803
1804 /*
1805 * Replace a store entry with
1806 * a new reply. This eats the reply.
1807 */
1808 void
1809 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1810 {
1811 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1812
1813 if (!mem_obj) {
1814 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1815 return;
1816 }
1817
1818 mem_obj->replaceHttpReply(rep);
1819
1820 if (andStartWriting)
1821 startWriting();
1822 }
1823
1824 void
1825 StoreEntry::startWriting()
1826 {
1827 Packer p;
1828
1829 /* TODO: when we store headers serparately remove the header portion */
1830 /* TODO: mark the length of the headers ? */
1831 /* We ONLY want the headers */
1832 packerToStoreInit(&p, this);
1833
1834 assert (isEmpty());
1835 assert(mem_obj);
1836
1837 const HttpReply *rep = getReply();
1838 assert(rep);
1839
1840 rep->packHeadersInto(&p);
1841 mem_obj->markEndOfReplyHeaders();
1842 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1843
1844 rep->body.packInto(&p);
1845
1846 packerClean(&p);
1847 }
1848
1849 char const *
1850 StoreEntry::getSerialisedMetaData()
1851 {
1852 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1853 int swap_hdr_sz;
1854 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1855 storeSwapTLVFree(tlv_list);
1856 assert (swap_hdr_sz >= 0);
1857 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1858 return result;
1859 }
1860
1861 void
1862 StoreEntry::trimMemory(const bool preserveSwappable)
1863 {
1864 /*
1865 * DPW 2007-05-09
1866 * Bug #1943. We must not let go any data for IN_MEMORY
1867 * objects. We have to wait until the mem_status changes.
1868 */
1869 if (mem_status == IN_MEMORY)
1870 return;
1871
1872 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1873 return; // cannot trim because we do not load them again
1874
1875 if (preserveSwappable)
1876 mem_obj->trimSwappable();
1877 else
1878 mem_obj->trimUnSwappable();
1879
1880 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1881 }
1882
1883 bool
1884 StoreEntry::modifiedSince(HttpRequest * request) const
1885 {
1886 int object_length;
1887 time_t mod_time = lastmod;
1888
1889 if (mod_time < 0)
1890 mod_time = timestamp;
1891
1892 debugs(88, 3, "modifiedSince: '" << url() << "'");
1893
1894 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1895
1896 if (mod_time < 0)
1897 return true;
1898
1899 /* Find size of the object */
1900 object_length = getReply()->content_length;
1901
1902 if (object_length < 0)
1903 object_length = contentLen();
1904
1905 if (mod_time > request->ims) {
1906 debugs(88, 3, "--> YES: entry newer than client");
1907 return true;
1908 } else if (mod_time < request->ims) {
1909 debugs(88, 3, "--> NO: entry older than client");
1910 return false;
1911 } else if (request->imslen < 0) {
1912 debugs(88, 3, "--> NO: same LMT, no client length");
1913 return false;
1914 } else if (request->imslen == object_length) {
1915 debugs(88, 3, "--> NO: same LMT, same length");
1916 return false;
1917 } else {
1918 debugs(88, 3, "--> YES: same LMT, different length");
1919 return true;
1920 }
1921 }
1922
1923 bool
1924 StoreEntry::hasEtag(ETag &etag) const
1925 {
1926 if (const HttpReply *reply = getReply()) {
1927 etag = reply->header.getETag(HDR_ETAG);
1928 if (etag.str)
1929 return true;
1930 }
1931 return false;
1932 }
1933
1934 bool
1935 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1936 {
1937 const String reqETags = request.header.getList(HDR_IF_MATCH);
1938 return hasOneOfEtags(reqETags, false);
1939 }
1940
1941 bool
1942 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1943 {
1944 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
1945 // weak comparison is allowed only for HEAD or full-body GET requests
1946 const bool allowWeakMatch = !request.flags.isRanged &&
1947 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1948 return hasOneOfEtags(reqETags, allowWeakMatch);
1949 }
1950
1951 /// whether at least one of the request ETags matches entity ETag
1952 bool
1953 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1954 {
1955 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
1956 if (!repETag.str)
1957 return strListIsMember(&reqETags, "*", ',');
1958
1959 bool matched = false;
1960 const char *pos = NULL;
1961 const char *item;
1962 int ilen;
1963 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1964 if (!strncmp(item, "*", ilen))
1965 matched = true;
1966 else {
1967 String str;
1968 str.append(item, ilen);
1969 ETag reqETag;
1970 if (etagParseInit(&reqETag, str.termedBuf())) {
1971 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1972 etagIsStrongEqual(repETag, reqETag);
1973 }
1974 }
1975 }
1976 return matched;
1977 }
1978
1979 SwapDir::Pointer
1980 StoreEntry::store() const
1981 {
1982 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
1983 return INDEXSD(swap_dirn);
1984 }
1985
1986 void
1987 StoreEntry::unlink()
1988 {
1989 store()->unlink(*this); // implies disconnect()
1990 swap_filen = -1;
1991 swap_dirn = -1;
1992 swap_status = SWAPOUT_NONE;
1993 }
1994
1995 /*
1996 * return true if the entry is in a state where
1997 * it can accept more data (ie with write() method)
1998 */
1999 bool
2000 StoreEntry::isAccepting() const
2001 {
2002 if (STORE_PENDING != store_status)
2003 return false;
2004
2005 if (EBIT_TEST(flags, ENTRY_ABORTED))
2006 return false;
2007
2008 return true;
2009 }
2010
2011 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2012 {
2013 os << "e:";
2014
2015 if (e.mem_obj) {
2016 if (e.mem_obj->xitTable.index > -1)
2017 os << 't' << e.mem_obj->xitTable.index;
2018 if (e.mem_obj->memCache.index > -1)
2019 os << 'm' << e.mem_obj->memCache.index;
2020 }
2021 if (e.swap_filen > -1 || e.swap_dirn > -1)
2022 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2023
2024 os << '=';
2025
2026 // print only non-default status values, using unique letters
2027 if (e.mem_status != NOT_IN_MEMORY ||
2028 e.store_status != STORE_PENDING ||
2029 e.swap_status != SWAPOUT_NONE ||
2030 e.ping_status != PING_NONE) {
2031 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2032 if (e.store_status != STORE_PENDING) os << 's';
2033 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2034 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2035 }
2036
2037 // print only set flags, using unique letters
2038 if (e.flags) {
2039 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2040 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2041 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2042 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2043 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2044 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2045 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2046 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2047 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2048 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2049 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2050 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2051 }
2052
2053 if (e.mem_obj && e.mem_obj->smpCollapsed)
2054 os << 'O';
2055
2056 return os << '/' << &e << '*' << e.locks();
2057 }
2058
2059 /* NullStoreEntry */
2060
2061 NullStoreEntry NullStoreEntry::_instance;
2062
2063 NullStoreEntry *
2064 NullStoreEntry::getInstance()
2065 {
2066 return &_instance;
2067 }
2068
2069 char const *
2070 NullStoreEntry::getMD5Text() const
2071 {
2072 return "N/A";
2073 }
2074
2075 void
2076 NullStoreEntry::operator delete(void*)
2077 {
2078 fatal ("Attempt to delete NullStoreEntry\n");
2079 }
2080
2081 char const *
2082 NullStoreEntry::getSerialisedMetaData()
2083 {
2084 return NULL;
2085 }
2086
2087 #if !_USE_INLINE_
2088 #include "Store.cci"
2089 #endif