]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / store.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 20 Storage Manager
6 * AUTHOR: Harvest Derived
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "CacheManager.h"
38 #include "comm/Connection.h"
39 #include "ETag.h"
40 #include "event.h"
41 #include "fde.h"
42 #include "Store.h"
43 #include "mgr/Registration.h"
44 #include "StoreClient.h"
45 #include "stmem.h"
46 #include "HttpReply.h"
47 #include "HttpRequest.h"
48 #include "MemObject.h"
49 #include "mem_node.h"
50 #include "StoreMeta.h"
51 #include "SwapDir.h"
52 #include "StoreIOState.h"
53 #if USE_DELAY_POOLS
54 #include "DelayPools.h"
55 #endif
56 #include "Stack.h"
57 #include "SquidTime.h"
58 #include "swap_log_op.h"
59 #include "mgr/StoreIoAction.h"
60
61 static STMCB storeWriteComplete;
62
63 #define REBUILD_TIMESTAMP_DELTA_MAX 2
64
65 #define STORE_IN_MEM_BUCKETS (229)
66
67
68 /** \todo Convert these string constants to enum string-arrays generated */
69
70 const char *memStatusStr[] = {
71 "NOT_IN_MEMORY",
72 "IN_MEMORY"
73 };
74
75 const char *pingStatusStr[] = {
76 "PING_NONE",
77 "PING_WAITING",
78 "PING_DONE"
79 };
80
81 const char *storeStatusStr[] = {
82 "STORE_OK",
83 "STORE_PENDING"
84 };
85
86 const char *swapStatusStr[] = {
87 "SWAPOUT_NONE",
88 "SWAPOUT_WRITING",
89 "SWAPOUT_DONE"
90 };
91
92
93 /*
94 * This defines an repl type
95 */
96
97 typedef struct _storerepl_entry storerepl_entry_t;
98
99 struct _storerepl_entry {
100 const char *typestr;
101 REMOVALPOLICYCREATE *create;
102 };
103
104 static storerepl_entry_t *storerepl_list = NULL;
105
106
107 /*
108 * local function prototypes
109 */
110 static int getKeyCounter(void);
111 static OBJH storeCheckCachableStats;
112 static EVH storeLateRelease;
113
114 /*
115 * local variables
116 */
117 static Stack<StoreEntry*> LateReleaseStack;
118 MemAllocator *StoreEntry::pool = NULL;
119
120 StorePointer Store::CurrentRoot = NULL;
121
122 void
123 Store::Root(Store * aRoot)
124 {
125 CurrentRoot = aRoot;
126 }
127
128 void
129 Store::Root(StorePointer aRoot)
130 {
131 Root(aRoot.getRaw());
132 }
133
134 void
135 Store::Stats(StoreEntry * output)
136 {
137 assert (output);
138 Root().stat(*output);
139 }
140
141 void
142 Store::create()
143 {}
144
145 void
146 Store::diskFull()
147 {}
148
149 void
150 Store::sync()
151 {}
152
153 void
154 Store::unlink (StoreEntry &anEntry)
155 {
156 fatal("Store::unlink on invalid Store\n");
157 }
158
159 void *
160 StoreEntry::operator new (size_t bytecount)
161 {
162 assert (bytecount == sizeof (StoreEntry));
163
164 if (!pool) {
165 pool = memPoolCreate ("StoreEntry", bytecount);
166 pool->setChunkSize(2048 * 1024);
167 }
168
169 return pool->alloc();
170 }
171
172 void
173 StoreEntry::operator delete (void *address)
174 {
175 pool->freeOne(address);
176 }
177
178 void
179 StoreEntry::makePublic()
180 {
181 /* This object can be cached for a long time */
182
183 if (EBIT_TEST(flags, ENTRY_CACHABLE))
184 setPublicKey();
185 }
186
187 void
188 StoreEntry::makePrivate()
189 {
190 /* This object should never be cached at all */
191 expireNow();
192 releaseRequest(); /* delete object when not used */
193 /* releaseRequest clears ENTRY_CACHABLE flag */
194 }
195
196 void
197 StoreEntry::cacheNegatively()
198 {
199 /* This object may be negatively cached */
200 negativeCache();
201
202 if (EBIT_TEST(flags, ENTRY_CACHABLE))
203 setPublicKey();
204 }
205
206 size_t
207 StoreEntry::inUseCount()
208 {
209 if (!pool)
210 return 0;
211 return pool->getInUseCount();
212 }
213
214 const char *
215 StoreEntry::getMD5Text() const
216 {
217 return storeKeyText((const cache_key *)key);
218 }
219
220 #include "comm.h"
221
222 void
223 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
224 {
225 StoreEntry *anEntry = (StoreEntry *)theContext;
226 anEntry->delayAwareRead(aRead.conn,
227 aRead.buf,
228 aRead.len,
229 aRead.callback);
230 }
231
232 void
233 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
234 {
235 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
236 /* sketch: readdeferer* = getdeferer.
237 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
238 */
239
240 if (amountToRead == 0) {
241 assert (mem_obj);
242 /* read ahead limit */
243 /* Perhaps these two calls should both live in MemObject */
244 #if USE_DELAY_POOLS
245 if (!mem_obj->readAheadPolicyCanRead()) {
246 #endif
247 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
248 return;
249 #if USE_DELAY_POOLS
250 }
251
252 /* delay id limit */
253 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
254 return;
255
256 #endif
257
258 }
259
260 if (fd_table[conn->fd].closing()) {
261 // Readers must have closing callbacks if they want to be notified. No
262 // readers appeared to care around 2009/12/14 as they skipped reading
263 // for other reasons. Closing may already be true at the delyaAwareRead
264 // call time or may happen while we wait after delayRead() above.
265 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
266 callback);
267 return; // the read callback will never be called
268 }
269
270 comm_read(conn, buf, amountToRead, callback);
271 }
272
273 size_t
274 StoreEntry::bytesWanted (Range<size_t> const aRange) const
275 {
276 assert (aRange.size());
277
278 if (mem_obj == NULL)
279 return aRange.end - 1;
280
281 #if URL_CHECKSUM_DEBUG
282
283 mem_obj->checkUrlChecksum();
284
285 #endif
286
287 /* Always read *something* here - we haven't got the header yet */
288 if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT))
289 return aRange.end - 1;
290
291 if (!mem_obj->readAheadPolicyCanRead())
292 return 0;
293
294 return mem_obj->mostBytesWanted(aRange.end - 1);
295 }
296
297 bool
298 StoreEntry::checkDeferRead(int fd) const
299 {
300 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
301 }
302
303 void
304 StoreEntry::setNoDelay (bool const newValue)
305 {
306 if (mem_obj)
307 mem_obj->setNoDelay(newValue);
308 }
309
310 store_client_t
311 StoreEntry::storeClientType() const
312 {
313 /* The needed offset isn't in memory
314 * XXX TODO: this is wrong for range requests
315 * as the needed offset may *not* be 0, AND
316 * offset 0 in the memory object is the HTTP headers.
317 */
318
319 if (mem_status == IN_MEMORY && UsingSmp()) {
320 // clients of an object cached in shared memory are memory clients
321 return STORE_MEM_CLIENT;
322 }
323
324 assert(mem_obj);
325
326 if (mem_obj->inmem_lo)
327 return STORE_DISK_CLIENT;
328
329 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
330 /* I don't think we should be adding clients to aborted entries */
331 debugs(20, 1, "storeClientType: adding to ENTRY_ABORTED entry");
332 return STORE_MEM_CLIENT;
333 }
334
335 if (store_status == STORE_OK) {
336 /* the object has completed. */
337
338 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
339 if (swap_status == SWAPOUT_DONE) {
340 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
341 if (mem_obj->endOffset() == mem_obj->object_sz) {
342 /* hot object fully swapped in */
343 return STORE_MEM_CLIENT;
344 }
345 } else {
346 /* Memory-only, or currently being swapped out */
347 return STORE_MEM_CLIENT;
348 }
349 }
350 return STORE_DISK_CLIENT;
351 }
352
353 /* here and past, entry is STORE_PENDING */
354 /*
355 * If this is the first client, let it be the mem client
356 */
357 if (mem_obj->nclients == 1)
358 return STORE_MEM_CLIENT;
359
360 /*
361 * If there is no disk file to open yet, we must make this a
362 * mem client. If we can't open the swapin file before writing
363 * to the client, there is no guarantee that we will be able
364 * to open it later when we really need it.
365 */
366 if (swap_status == SWAPOUT_NONE)
367 return STORE_MEM_CLIENT;
368
369 /*
370 * otherwise, make subsequent clients read from disk so they
371 * can not delay the first, and vice-versa.
372 */
373 return STORE_DISK_CLIENT;
374 }
375
376 StoreEntry::StoreEntry():
377 hidden_mem_obj(NULL),
378 swap_file_sz(0)
379 {
380 debugs(20, 3, HERE << "new StoreEntry " << this);
381 mem_obj = NULL;
382
383 expires = lastmod = lastref = timestamp = -1;
384
385 swap_status = SWAPOUT_NONE;
386 swap_filen = -1;
387 swap_dirn = -1;
388 }
389
390 StoreEntry::StoreEntry(const char *aUrl, const char *aLogUrl):
391 hidden_mem_obj(NULL),
392 swap_file_sz(0)
393 {
394 debugs(20, 3, HERE << "new StoreEntry " << this);
395 mem_obj = new MemObject(aUrl, aLogUrl);
396
397 expires = lastmod = lastref = timestamp = -1;
398
399 swap_status = SWAPOUT_NONE;
400 swap_filen = -1;
401 swap_dirn = -1;
402 }
403
404 StoreEntry::~StoreEntry()
405 {
406 if (swap_filen >= 0) {
407 SwapDir &sd = dynamic_cast<SwapDir&>(*store());
408 sd.disconnect(*this);
409 }
410 delete hidden_mem_obj;
411 }
412
413 void
414 StoreEntry::destroyMemObject()
415 {
416 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
417 setMemStatus(NOT_IN_MEMORY);
418 MemObject *mem = mem_obj;
419 mem_obj = NULL;
420 delete mem;
421 delete hidden_mem_obj;
422 hidden_mem_obj = NULL;
423 }
424
425 void
426 StoreEntry::hideMemObject()
427 {
428 debugs(20, 3, HERE << "hiding " << mem_obj);
429 assert(mem_obj);
430 assert(!hidden_mem_obj);
431 hidden_mem_obj = mem_obj;
432 mem_obj = NULL;
433 }
434
435 void
436 destroyStoreEntry(void *data)
437 {
438 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
439 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
440 assert(e != NULL);
441
442 if (e == NullStoreEntry::getInstance())
443 return;
444
445 e->destroyMemObject();
446
447 e->hashDelete();
448
449 assert(e->key == NULL);
450
451 delete e;
452 }
453
454 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
455
456 void
457 StoreEntry::hashInsert(const cache_key * someKey)
458 {
459 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << this << " key '" << storeKeyText(someKey) << "'");
460 key = storeKeyDup(someKey);
461 hash_join(store_table, this);
462 }
463
464 void
465 StoreEntry::hashDelete()
466 {
467 hash_remove_link(store_table, this);
468 storeKeyFree((const cache_key *)key);
469 key = NULL;
470 }
471
472 /* -------------------------------------------------------------------------- */
473
474
475 /* get rid of memory copy of the object */
476 void
477 StoreEntry::purgeMem()
478 {
479 if (mem_obj == NULL)
480 return;
481
482 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
483
484 destroyMemObject();
485
486 if (swap_status != SWAPOUT_DONE)
487 release();
488 }
489
490 /* RBC 20050104 this is wrong- memory ref counting
491 * is not at all equivalent to the store 'usage' concept
492 * which the replacement policies should be acting upon.
493 * specifically, object iteration within stores needs
494 * memory ref counting to prevent race conditions,
495 * but this should not influence store replacement.
496 */
497 void
498
499 StoreEntry::lock()
500 {
501 lock_count++;
502 debugs(20, 3, "StoreEntry::lock: key '" << getMD5Text() <<"' count=" <<
503 lock_count );
504 lastref = squid_curtime;
505 Store::Root().reference(*this);
506 }
507
508 void
509 StoreEntry::setReleaseFlag()
510 {
511 if (EBIT_TEST(flags, RELEASE_REQUEST))
512 return;
513
514 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
515
516 EBIT_SET(flags, RELEASE_REQUEST);
517 }
518
519 void
520 StoreEntry::releaseRequest()
521 {
522 if (EBIT_TEST(flags, RELEASE_REQUEST))
523 return;
524
525 setReleaseFlag();
526
527 /*
528 * Clear cachable flag here because we might get called before
529 * anyone else even looks at the cachability flag. Also, this
530 * prevents httpMakePublic from really setting a public key.
531 */
532 EBIT_CLR(flags, ENTRY_CACHABLE);
533
534 setPrivateKey();
535 }
536
537 /* unlock object, return -1 if object get released after unlock
538 * otherwise lock_count */
539 int
540 StoreEntry::unlock()
541 {
542 lock_count--;
543 debugs(20, 3, "StoreEntry::unlock: key '" << getMD5Text() << "' count=" << lock_count);
544
545 if (lock_count)
546 return (int) lock_count;
547
548 if (store_status == STORE_PENDING)
549 setReleaseFlag();
550
551 assert(storePendingNClients(this) == 0);
552
553 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
554 this->release();
555 return 0;
556 }
557
558 if (EBIT_TEST(flags, KEY_PRIVATE))
559 debugs(20, 1, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
560
561 Store::Root().handleIdleEntry(*this); // may delete us
562 return 0;
563 }
564
565 void
566 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
567 {
568 assert (aClient);
569 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
570
571 if (!result)
572 aClient->created (NullStoreEntry::getInstance());
573 else
574 aClient->created (result);
575 }
576
577 void
578 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
579 {
580 assert (aClient);
581 StoreEntry *result = storeGetPublicByRequest (request);
582
583 if (!result)
584 result = NullStoreEntry::getInstance();
585
586 aClient->created (result);
587 }
588
589 void
590 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
591 {
592 assert (aClient);
593 StoreEntry *result = storeGetPublic (uri, method);
594
595 if (!result)
596 result = NullStoreEntry::getInstance();
597
598 aClient->created (result);
599 }
600
601 StoreEntry *
602 storeGetPublic(const char *uri, const HttpRequestMethod& method)
603 {
604 return Store::Root().get(storeKeyPublic(uri, method));
605 }
606
607 StoreEntry *
608 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
609 {
610 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
611 }
612
613 StoreEntry *
614 storeGetPublicByRequest(HttpRequest * req)
615 {
616 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
617
618 if (e == NULL && req->method == METHOD_HEAD)
619 /* We can generate a HEAD reply from a cached GET object */
620 e = storeGetPublicByRequestMethod(req, METHOD_GET);
621
622 return e;
623 }
624
625 static int
626 getKeyCounter(void)
627 {
628 static int key_counter = 0;
629
630 if (++key_counter < 0)
631 key_counter = 1;
632
633 return key_counter;
634 }
635
636 /* RBC 20050104 AFAICT this should become simpler:
637 * rather than reinserting with a special key it should be marked
638 * as 'released' and then cleaned up when refcounting indicates.
639 * the StoreHashIndex could well implement its 'released' in the
640 * current manner.
641 * Also, clean log writing should skip over ia,t
642 * Otherwise, we need a 'remove from the index but not the store
643 * concept'.
644 */
645 void
646 StoreEntry::setPrivateKey()
647 {
648 const cache_key *newkey;
649
650 if (key && EBIT_TEST(flags, KEY_PRIVATE))
651 return; /* is already private */
652
653 if (key) {
654 if (swap_filen > -1)
655 storeDirSwapLog(this, SWAP_LOG_DEL);
656
657 hashDelete();
658 }
659
660 if (mem_obj != NULL) {
661 mem_obj->id = getKeyCounter();
662 newkey = storeKeyPrivate(mem_obj->url, mem_obj->method, mem_obj->id);
663 } else {
664 newkey = storeKeyPrivate("JUNK", METHOD_NONE, getKeyCounter());
665 }
666
667 assert(hash_lookup(store_table, newkey) == NULL);
668 EBIT_SET(flags, KEY_PRIVATE);
669 hashInsert(newkey);
670 }
671
672 void
673 StoreEntry::setPublicKey()
674 {
675 StoreEntry *e2 = NULL;
676 const cache_key *newkey;
677
678 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
679 return; /* is already public */
680
681 assert(mem_obj);
682
683 /*
684 * We can't make RELEASE_REQUEST objects public. Depending on
685 * when RELEASE_REQUEST gets set, we might not be swapping out
686 * the object. If we're not swapping out, then subsequent
687 * store clients won't be able to access object data which has
688 * been freed from memory.
689 *
690 * If RELEASE_REQUEST is set, then ENTRY_CACHABLE should not
691 * be set, and StoreEntry::setPublicKey() should not be called.
692 */
693 #if MORE_DEBUG_OUTPUT
694
695 if (EBIT_TEST(flags, RELEASE_REQUEST))
696 debugs(20, 1, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
697
698 #endif
699
700 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
701
702 if (mem_obj->request) {
703 HttpRequest *request = mem_obj->request;
704
705 if (!mem_obj->vary_headers) {
706 /* First handle the case where the object no longer varies */
707 safe_free(request->vary_headers);
708 } else {
709 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
710 /* Oops.. the variance has changed. Kill the base object
711 * to record the new variance key
712 */
713 safe_free(request->vary_headers); /* free old "bad" variance key */
714 StoreEntry *pe = storeGetPublic(mem_obj->url, mem_obj->method);
715
716 if (pe)
717 pe->release();
718 }
719
720 /* Make sure the request knows the variance status */
721 if (!request->vary_headers) {
722 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
723
724 if (vary)
725 request->vary_headers = xstrdup(vary);
726 }
727 }
728
729 // TODO: storeGetPublic() calls below may create unlocked entries.
730 // We should add/use storeHas() API or lock/unlock those entries.
731 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->url, mem_obj->method)) {
732 /* Create "vary" base object */
733 String vary;
734 StoreEntry *pe = storeCreateEntry(mem_obj->url, mem_obj->log_url, request->flags, request->method);
735 /* We are allowed to do this typecast */
736 HttpReply *rep = new HttpReply;
737 rep->setHeaders(HTTP_OK, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
738 vary = mem_obj->getReply()->header.getList(HDR_VARY);
739
740 if (vary.size()) {
741 /* Again, we own this structure layout */
742 rep->header.putStr(HDR_VARY, vary.termedBuf());
743 vary.clean();
744 }
745
746 #if X_ACCELERATOR_VARY
747 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
748
749 if (vary.defined()) {
750 /* Again, we own this structure layout */
751 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
752 vary.clean();
753 }
754
755 #endif
756 pe->replaceHttpReply(rep);
757
758 pe->timestampsSet();
759
760 pe->makePublic();
761
762 pe->complete();
763
764 pe->unlock();
765 }
766
767 newkey = storeKeyPublicByRequest(mem_obj->request);
768 } else
769 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
770
771 if ((e2 = (StoreEntry *) hash_lookup(store_table, newkey))) {
772 debugs(20, 3, "StoreEntry::setPublicKey: Making old '" << mem_obj->url << "' private.");
773 e2->setPrivateKey();
774 e2->release();
775
776 if (mem_obj->request)
777 newkey = storeKeyPublicByRequest(mem_obj->request);
778 else
779 newkey = storeKeyPublic(mem_obj->url, mem_obj->method);
780 }
781
782 if (key)
783 hashDelete();
784
785 EBIT_CLR(flags, KEY_PRIVATE);
786
787 hashInsert(newkey);
788
789 if (swap_filen > -1)
790 storeDirSwapLog(this, SWAP_LOG_ADD);
791 }
792
793 StoreEntry *
794 storeCreateEntry(const char *url, const char *log_url, request_flags flags, const HttpRequestMethod& method)
795 {
796 StoreEntry *e = NULL;
797 MemObject *mem = NULL;
798 debugs(20, 3, "storeCreateEntry: '" << url << "'");
799
800 e = new StoreEntry(url, log_url);
801 e->lock_count = 1; /* Note lock here w/o calling storeLock() */
802 mem = e->mem_obj;
803 mem->method = method;
804
805 if (neighbors_do_private_keys || !flags.hierarchical)
806 e->setPrivateKey();
807 else
808 e->setPublicKey();
809
810 if (flags.cachable) {
811 EBIT_SET(e->flags, ENTRY_CACHABLE);
812 EBIT_CLR(e->flags, RELEASE_REQUEST);
813 } else {
814 /* StoreEntry::releaseRequest() clears ENTRY_CACHABLE */
815 e->releaseRequest();
816 }
817
818 e->store_status = STORE_PENDING;
819 e->setMemStatus(NOT_IN_MEMORY);
820 e->refcount = 0;
821 e->lastref = squid_curtime;
822 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
823 e->ping_status = PING_NONE;
824 EBIT_SET(e->flags, ENTRY_VALIDATED);
825 return e;
826 }
827
828 /* Mark object as expired */
829 void
830 StoreEntry::expireNow()
831 {
832 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
833 expires = squid_curtime;
834 }
835
836 void
837 storeWriteComplete (void *data, StoreIOBuffer wroteBuffer)
838 {
839 PROF_start(storeWriteComplete);
840 StoreEntry *e = (StoreEntry *)data;
841
842 if (EBIT_TEST(e->flags, DELAY_SENDING)) {
843 PROF_stop(storeWriteComplete);
844 return;
845 }
846
847 e->invokeHandlers();
848 PROF_stop(storeWriteComplete);
849 }
850
851 void
852 StoreEntry::write (StoreIOBuffer writeBuffer)
853 {
854 assert(mem_obj != NULL);
855 /* This assert will change when we teach the store to update */
856 PROF_start(StoreEntry_write);
857 assert(store_status == STORE_PENDING);
858
859 if (!writeBuffer.length) {
860 /* the headers are received already, but we have not received
861 * any body data. There are BROKEN abuses of HTTP which require
862 * the headers to be passed along before any body data - see
863 * http://developer.apple.com/documentation/QuickTime/QTSS/Concepts/chapter_2_section_14.html
864 * for an example of such bad behaviour. To accomodate this, if
865 * we have a empty write arrive, we flush to our clients.
866 * -RBC 20060903
867 */
868 PROF_stop(StoreEntry_write);
869 invokeHandlers();
870 return;
871 }
872
873 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
874 PROF_stop(StoreEntry_write);
875 storeGetMemSpace(writeBuffer.length);
876 mem_obj->write (writeBuffer, storeWriteComplete, this);
877 }
878
879 /* Append incoming data from a primary server to an entry. */
880 void
881 StoreEntry::append(char const *buf, int len)
882 {
883 assert(mem_obj != NULL);
884 assert(len >= 0);
885 assert(store_status == STORE_PENDING);
886
887 StoreIOBuffer tempBuffer;
888 tempBuffer.data = (char *)buf;
889 tempBuffer.length = len;
890 /*
891 * XXX sigh, offset might be < 0 here, but it gets "corrected"
892 * later. This offset crap is such a mess.
893 */
894 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
895 write(tempBuffer);
896 }
897
898
899 void
900 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
901 {
902 va_list args;
903 va_start(args, fmt);
904
905 storeAppendVPrintf(e, fmt, args);
906 va_end(args);
907 }
908
909 /* used be storeAppendPrintf and Packer */
910 void
911 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
912 {
913 LOCAL_ARRAY(char, buf, 4096);
914 buf[0] = '\0';
915 vsnprintf(buf, 4096, fmt, vargs);
916 e->append(buf, strlen(buf));
917 }
918
919 struct _store_check_cachable_hist {
920
921 struct {
922 int non_get;
923 int not_entry_cachable;
924 int wrong_content_length;
925 int negative_cached;
926 int too_big;
927 int too_small;
928 int private_key;
929 int too_many_open_files;
930 int too_many_open_fds;
931 } no;
932
933 struct {
934 int Default;
935 } yes;
936 } store_check_cachable_hist;
937
938 int
939 storeTooManyDiskFilesOpen(void)
940 {
941 if (Config.max_open_disk_fds == 0)
942 return 0;
943
944 if (store_open_disk_fd > Config.max_open_disk_fds)
945 return 1;
946
947 return 0;
948 }
949
950 int
951 StoreEntry::checkTooSmall()
952 {
953 if (EBIT_TEST(flags, ENTRY_SPECIAL))
954 return 0;
955
956 if (STORE_OK == store_status)
957 if (mem_obj->object_sz < 0 ||
958 mem_obj->object_sz < Config.Store.minObjectSize)
959 return 1;
960 if (getReply()->content_length > -1)
961 if (getReply()->content_length < Config.Store.minObjectSize)
962 return 1;
963 return 0;
964 }
965
966 // TODO: remove checks already performed by swapoutPossible()
967 // TODO: move "too many open..." checks outside -- we are called too early/late
968 int
969 StoreEntry::checkCachable()
970 {
971 #if CACHE_ALL_METHODS
972
973 if (mem_obj->method != METHOD_GET) {
974 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
975 store_check_cachable_hist.no.non_get++;
976 } else
977 #endif
978 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
979 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
980 store_check_cachable_hist.no.wrong_content_length++;
981 } else if (!EBIT_TEST(flags, ENTRY_CACHABLE)) {
982 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
983 store_check_cachable_hist.no.not_entry_cachable++;
984 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
985 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
986 store_check_cachable_hist.no.negative_cached++;
987 return 0; /* avoid release call below */
988 } else if ((getReply()->content_length > 0 &&
989 getReply()->content_length
990 > Config.Store.maxObjectSize) ||
991 mem_obj->endOffset() > Config.Store.maxObjectSize) {
992 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
993 store_check_cachable_hist.no.too_big++;
994 } else if (getReply()->content_length > Config.Store.maxObjectSize) {
995 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
996 store_check_cachable_hist.no.too_big++;
997 } else if (checkTooSmall()) {
998 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
999 store_check_cachable_hist.no.too_small++;
1000 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1001 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1002 store_check_cachable_hist.no.private_key++;
1003 } else if (swap_status != SWAPOUT_NONE) {
1004 /*
1005 * here we checked the swap_status because the remaining
1006 * cases are only relevant only if we haven't started swapping
1007 * out the object yet.
1008 */
1009 return 1;
1010 } else if (storeTooManyDiskFilesOpen()) {
1011 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1012 store_check_cachable_hist.no.too_many_open_files++;
1013 } else if (fdNFree() < RESERVED_FD) {
1014 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1015 store_check_cachable_hist.no.too_many_open_fds++;
1016 } else {
1017 store_check_cachable_hist.yes.Default++;
1018 return 1;
1019 }
1020
1021 releaseRequest();
1022 /* StoreEntry::releaseRequest() cleared ENTRY_CACHABLE */
1023 return 0;
1024 }
1025
1026 void
1027 storeCheckCachableStats(StoreEntry *sentry)
1028 {
1029 storeAppendPrintf(sentry, "Category\t Count\n");
1030
1031 #if CACHE_ALL_METHODS
1032
1033 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1034 store_check_cachable_hist.no.non_get);
1035 #endif
1036
1037 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1038 store_check_cachable_hist.no.not_entry_cachable);
1039 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1040 store_check_cachable_hist.no.wrong_content_length);
1041 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1042 store_check_cachable_hist.no.negative_cached);
1043 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1044 store_check_cachable_hist.no.too_big);
1045 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1046 store_check_cachable_hist.no.too_small);
1047 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1048 store_check_cachable_hist.no.private_key);
1049 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1050 store_check_cachable_hist.no.too_many_open_files);
1051 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1052 store_check_cachable_hist.no.too_many_open_fds);
1053 storeAppendPrintf(sentry, "yes.default\t%d\n",
1054 store_check_cachable_hist.yes.Default);
1055 }
1056
1057 void
1058 StoreEntry::complete()
1059 {
1060 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1061
1062 if (store_status != STORE_PENDING) {
1063 /*
1064 * if we're not STORE_PENDING, then probably we got aborted
1065 * and there should be NO clients on this entry
1066 */
1067 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1068 assert(mem_obj->nclients == 0);
1069 return;
1070 }
1071
1072 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1073 * in use of object_sz?
1074 */
1075 mem_obj->object_sz = mem_obj->endOffset();
1076
1077 store_status = STORE_OK;
1078
1079 assert(mem_status == NOT_IN_MEMORY);
1080
1081 if (!validLength()) {
1082 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1083 releaseRequest();
1084 }
1085
1086 #if USE_CACHE_DIGESTS
1087 if (mem_obj->request)
1088 mem_obj->request->hier.store_complete_stop = current_time;
1089
1090 #endif
1091 /*
1092 * We used to call invokeHandlers, then storeSwapOut. However,
1093 * Madhukar Reddy <myreddy@persistence.com> reported that
1094 * responses without content length would sometimes get released
1095 * in client_side, thinking that the response is incomplete.
1096 */
1097 invokeHandlers();
1098 }
1099
1100 /*
1101 * Someone wants to abort this transfer. Set the reason in the
1102 * request structure, call the server-side callback and mark the
1103 * entry for releasing
1104 */
1105 void
1106 StoreEntry::abort()
1107 {
1108 statCounter.aborted_requests++;
1109 assert(store_status == STORE_PENDING);
1110 assert(mem_obj != NULL);
1111 debugs(20, 6, "storeAbort: " << getMD5Text());
1112
1113 lock(); /* lock while aborting */
1114 negativeCache();
1115
1116 releaseRequest();
1117
1118 EBIT_SET(flags, ENTRY_ABORTED);
1119
1120 setMemStatus(NOT_IN_MEMORY);
1121
1122 store_status = STORE_OK;
1123
1124 /* Notify the server side */
1125
1126 /*
1127 * DPW 2007-05-07
1128 * Should we check abort.data for validity?
1129 */
1130 if (mem_obj->abort.callback) {
1131 if (!cbdataReferenceValid(mem_obj->abort.data))
1132 debugs(20,1,HERE << "queueing event when abort.data is not valid");
1133 eventAdd("mem_obj->abort.callback",
1134 mem_obj->abort.callback,
1135 mem_obj->abort.data,
1136 0.0,
1137 true);
1138 unregisterAbort();
1139 }
1140
1141 /* XXX Should we reverse these two, so that there is no
1142 * unneeded disk swapping triggered?
1143 */
1144 /* Notify the client side */
1145 invokeHandlers();
1146
1147 // abort swap out, invalidating what was created so far (release follows)
1148 swapOutFileClose(StoreIOState::writerGone);
1149
1150 unlock(); /* unlock */
1151 }
1152
1153 /**
1154 * Clear Memory storage to accommodate the given object len
1155 */
1156 void
1157 storeGetMemSpace(int size)
1158 {
1159 PROF_start(storeGetMemSpace);
1160 StoreEntry *e = NULL;
1161 int released = 0;
1162 static time_t last_check = 0;
1163 size_t pages_needed;
1164 RemovalPurgeWalker *walker;
1165
1166 if (squid_curtime == last_check) {
1167 PROF_stop(storeGetMemSpace);
1168 return;
1169 }
1170
1171 last_check = squid_curtime;
1172
1173 pages_needed = (size / SM_PAGE_SIZE) + 1;
1174
1175 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1176 PROF_stop(storeGetMemSpace);
1177 return;
1178 }
1179
1180 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1181 " pages");
1182
1183 /* XXX what to set as max_scan here? */
1184 walker = mem_policy->PurgeInit(mem_policy, 100000);
1185
1186 while ((e = walker->Next(walker))) {
1187 e->purgeMem();
1188 released++;
1189
1190 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1191 break;
1192 }
1193
1194 walker->Done(walker);
1195 debugs(20, 3, "storeGetMemSpace stats:");
1196 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1197 debugs(20, 3, " " << std::setw(6) << released << " were released");
1198 PROF_stop(storeGetMemSpace);
1199 }
1200
1201
1202 /* thunk through to Store::Root().maintain(). Note that this would be better still
1203 * if registered against the root store itself, but that requires more complex
1204 * update logic - bigger fish to fry first. Long term each store when
1205 * it becomes active will self register
1206 */
1207 void
1208 Store::Maintain(void *notused)
1209 {
1210 Store::Root().maintain();
1211
1212 /* Reregister a maintain event .. */
1213 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1214
1215 }
1216
1217 /* The maximum objects to scan for maintain storage space */
1218 #define MAINTAIN_MAX_SCAN 1024
1219 #define MAINTAIN_MAX_REMOVE 64
1220
1221 /*
1222 * This routine is to be called by main loop in main.c.
1223 * It removes expired objects on only one bucket for each time called.
1224 *
1225 * This should get called 1/s from main().
1226 */
1227 void
1228 StoreController::maintain()
1229 {
1230 static time_t last_warn_time = 0;
1231
1232 PROF_start(storeMaintainSwapSpace);
1233 swapDir->maintain();
1234
1235 /* this should be emitted by the oversize dir, not globally */
1236
1237 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1238 if (squid_curtime - last_warn_time > 10) {
1239 debugs(20, 0, "WARNING: Disk space over limit: "
1240 << Store::Root().currentSize() / 1024.0 << " KB > "
1241 << (Store::Root().maxSize() >> 10) << " KB");
1242 last_warn_time = squid_curtime;
1243 }
1244 }
1245
1246 PROF_stop(storeMaintainSwapSpace);
1247 }
1248
1249 /* release an object from a cache */
1250 void
1251 StoreEntry::release()
1252 {
1253 PROF_start(storeRelease);
1254 debugs(20, 3, "storeRelease: Releasing: '" << getMD5Text() << "'");
1255 /* If, for any reason we can't discard this object because of an
1256 * outstanding request, mark it for pending release */
1257
1258 if (locked()) {
1259 expireNow();
1260 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1261 releaseRequest();
1262 PROF_stop(storeRelease);
1263 return;
1264 }
1265
1266 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1267 setPrivateKey();
1268
1269 if (mem_obj)
1270 destroyMemObject();
1271
1272 if (swap_filen > -1) {
1273 /*
1274 * Fake a call to StoreEntry->lock() When rebuilding is done,
1275 * we'll just call StoreEntry->unlock() on these.
1276 */
1277 lock_count++;
1278 setReleaseFlag();
1279 LateReleaseStack.push_back(this);
1280 } else {
1281 destroyStoreEntry(static_cast<hash_link *>(this));
1282 // "this" is no longer valid
1283 }
1284
1285 PROF_stop(storeRelease);
1286 return;
1287 }
1288
1289 storeLog(STORE_LOG_RELEASE, this);
1290
1291 if (swap_filen > -1) {
1292 // log before unlink() below clears swap_filen
1293 if (!EBIT_TEST(flags, KEY_PRIVATE))
1294 storeDirSwapLog(this, SWAP_LOG_DEL);
1295
1296 unlink();
1297 }
1298
1299 setMemStatus(NOT_IN_MEMORY);
1300 destroyStoreEntry(static_cast<hash_link *>(this));
1301 PROF_stop(storeRelease);
1302 }
1303
1304 static void
1305 storeLateRelease(void *unused)
1306 {
1307 StoreEntry *e;
1308 int i;
1309 static int n = 0;
1310
1311 if (StoreController::store_dirs_rebuilding) {
1312 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1313 return;
1314 }
1315
1316 for (i = 0; i < 10; i++) {
1317 e = LateReleaseStack.count ? LateReleaseStack.pop() : NULL;
1318
1319 if (e == NULL) {
1320 /* done! */
1321 debugs(20, 1, "storeLateRelease: released " << n << " objects");
1322 return;
1323 }
1324
1325 e->unlock();
1326 n++;
1327 }
1328
1329 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1330 }
1331
1332 /* return 1 if a store entry is locked */
1333 int
1334 StoreEntry::locked() const
1335 {
1336 if (lock_count)
1337 return 1;
1338
1339 if (swap_status == SWAPOUT_WRITING)
1340 return 1;
1341
1342 if (store_status == STORE_PENDING)
1343 return 1;
1344
1345 /*
1346 * SPECIAL, PUBLIC entries should be "locked"
1347 */
1348 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1349 if (!EBIT_TEST(flags, KEY_PRIVATE))
1350 return 1;
1351
1352 return 0;
1353 }
1354
1355 bool
1356 StoreEntry::validLength() const
1357 {
1358 int64_t diff;
1359 const HttpReply *reply;
1360 assert(mem_obj != NULL);
1361 reply = getReply();
1362 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1363 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1364 objectLen());
1365 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1366 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1367
1368 if (reply->content_length < 0) {
1369 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1370 return 1;
1371 }
1372
1373 if (reply->hdr_sz == 0) {
1374 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1375 return 1;
1376 }
1377
1378 if (mem_obj->method == METHOD_HEAD) {
1379 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1380 return 1;
1381 }
1382
1383 if (reply->sline.status == HTTP_NOT_MODIFIED)
1384 return 1;
1385
1386 if (reply->sline.status == HTTP_NO_CONTENT)
1387 return 1;
1388
1389 diff = reply->hdr_sz + reply->content_length - objectLen();
1390
1391 if (diff == 0)
1392 return 1;
1393
1394 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1395
1396 return 0;
1397 }
1398
1399 static void
1400 storeRegisterWithCacheManager(void)
1401 {
1402 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1403 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1404 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1405 storeCheckCachableStats, 0, 1);
1406 }
1407
1408 void
1409 storeInit(void)
1410 {
1411 storeKeyInit();
1412 mem_policy = createRemovalPolicy(Config.memPolicy);
1413 storeDigestInit();
1414 storeLogOpen();
1415 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1416 Store::Root().init();
1417 storeRebuildStart();
1418
1419 storeRegisterWithCacheManager();
1420 }
1421
1422 void
1423 storeConfigure(void)
1424 {
1425 store_swap_high = (long) (((float) Store::Root().maxSize() *
1426 (float) Config.Swap.highWaterMark) / (float) 100);
1427 store_swap_low = (long) (((float) Store::Root().maxSize() *
1428 (float) Config.Swap.lowWaterMark) / (float) 100);
1429 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1430 }
1431
1432 bool
1433 StoreEntry::memoryCachable() const
1434 {
1435 if (mem_obj == NULL)
1436 return 0;
1437
1438 if (mem_obj->data_hdr.size() == 0)
1439 return 0;
1440
1441 if (mem_obj->inmem_lo != 0)
1442 return 0;
1443
1444 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1445 return 0;
1446
1447 if (UsingSmp()) {
1448 const int64_t expectedSize = mem_obj->expectedReplySize();
1449 // objects of unknown size are not allowed into memory cache, for now
1450 if (expectedSize < 0 ||
1451 expectedSize > static_cast<int64_t>(Config.Store.maxInMemObjSize))
1452 return 0;
1453 }
1454
1455 return 1;
1456 }
1457
1458 int
1459 StoreEntry::checkNegativeHit() const
1460 {
1461 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1462 return 0;
1463
1464 if (expires <= squid_curtime)
1465 return 0;
1466
1467 if (store_status != STORE_OK)
1468 return 0;
1469
1470 return 1;
1471 }
1472
1473 /**
1474 * Set object for negative caching.
1475 * Preserves any expiry information given by the server.
1476 * In absence of proper expiry info it will set to expire immediately,
1477 * or with HTTP-violations enabled the configured negative-TTL is observed
1478 */
1479 void
1480 StoreEntry::negativeCache()
1481 {
1482 // XXX: should make the default for expires 0 instead of -1
1483 // so we can distinguish "Expires: -1" from nothing.
1484 if (expires <= 0)
1485 #if USE_HTTP_VIOLATIONS
1486 expires = squid_curtime + Config.negativeTtl;
1487 #else
1488 expires = squid_curtime;
1489 #endif
1490 EBIT_SET(flags, ENTRY_NEGCACHED);
1491 }
1492
1493 void
1494 storeFreeMemory(void)
1495 {
1496 Store::Root(NULL);
1497 #if USE_CACHE_DIGESTS
1498
1499 if (store_digest)
1500 cacheDigestDestroy(store_digest);
1501
1502 #endif
1503
1504 store_digest = NULL;
1505 }
1506
1507 int
1508 expiresMoreThan(time_t expires, time_t when)
1509 {
1510 if (expires < 0) /* No Expires given */
1511 return 1;
1512
1513 return (expires > (squid_curtime + when));
1514 }
1515
1516 int
1517 StoreEntry::validToSend() const
1518 {
1519 if (EBIT_TEST(flags, RELEASE_REQUEST))
1520 return 0;
1521
1522 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1523 if (expires <= squid_curtime)
1524 return 0;
1525
1526 if (EBIT_TEST(flags, ENTRY_ABORTED))
1527 return 0;
1528
1529 return 1;
1530 }
1531
1532 void
1533 StoreEntry::timestampsSet()
1534 {
1535 const HttpReply *reply = getReply();
1536 time_t served_date = reply->date;
1537 int age = reply->header.getInt(HDR_AGE);
1538 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1539 /* make sure that 0 <= served_date <= squid_curtime */
1540
1541 if (served_date < 0 || served_date > squid_curtime)
1542 served_date = squid_curtime;
1543
1544 /* Bug 1791:
1545 * If the returned Date: is more than 24 hours older than
1546 * the squid_curtime, then one of us needs to use NTP to set our
1547 * clock. We'll pretend that our clock is right.
1548 */
1549 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1550 served_date = squid_curtime;
1551
1552 /*
1553 * Compensate with Age header if origin server clock is ahead
1554 * of us and there is a cache in between us and the origin
1555 * server. But DONT compensate if the age value is larger than
1556 * squid_curtime because it results in a negative served_date.
1557 */
1558 if (age > squid_curtime - served_date)
1559 if (squid_curtime > age)
1560 served_date = squid_curtime - age;
1561
1562 // compensate for Squid-to-server and server-to-Squid delays
1563 if (mem_obj && mem_obj->request) {
1564 const time_t request_sent =
1565 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1566 if (0 < request_sent && request_sent < squid_curtime)
1567 served_date -= (squid_curtime - request_sent);
1568 }
1569
1570 if (reply->expires > 0 && reply->date > -1)
1571 expires = served_date + (reply->expires - reply->date);
1572 else
1573 expires = reply->expires;
1574
1575 lastmod = reply->last_modified;
1576
1577 timestamp = served_date;
1578 }
1579
1580 void
1581 StoreEntry::registerAbort(STABH * cb, void *data)
1582 {
1583 assert(mem_obj);
1584 assert(mem_obj->abort.callback == NULL);
1585 mem_obj->abort.callback = cb;
1586 mem_obj->abort.data = cbdataReference(data);
1587 }
1588
1589 void
1590 StoreEntry::unregisterAbort()
1591 {
1592 assert(mem_obj);
1593 if (mem_obj->abort.callback) {
1594 mem_obj->abort.callback = NULL;
1595 cbdataReferenceDone(mem_obj->abort.data);
1596 }
1597 }
1598
1599 void
1600 StoreEntry::dump(int l) const
1601 {
1602 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1603 debugs(20, l, "StoreEntry->next: " << next);
1604 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1605 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1606 debugs(20, l, "StoreEntry->lastref: " << lastref);
1607 debugs(20, l, "StoreEntry->expires: " << expires);
1608 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1609 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1610 debugs(20, l, "StoreEntry->refcount: " << refcount);
1611 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1612 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1613 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1614 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1615 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1616 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1617 debugs(20, l, "StoreEntry->store_status: " << store_status);
1618 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1619 }
1620
1621 /*
1622 * NOTE, this function assumes only two mem states
1623 */
1624 void
1625 StoreEntry::setMemStatus(mem_status_t new_status)
1626 {
1627 if (new_status == mem_status)
1628 return;
1629
1630 if (UsingSmp()) {
1631 assert(new_status != IN_MEMORY); // we do not call this otherwise
1632 // This method was designed to update replacement policy, not to
1633 // actually purge something from the memory cache (TODO: rename?).
1634 // Shared memory cache does not have a policy that needs updates.
1635 mem_status = new_status;
1636 return;
1637 }
1638
1639 assert(mem_obj != NULL);
1640
1641 if (new_status == IN_MEMORY) {
1642 assert(mem_obj->inmem_lo == 0);
1643
1644 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1645 debugs(20, 4, "StoreEntry::setMemStatus: not inserting special " << mem_obj->url << " into policy");
1646 } else {
1647 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1648 debugs(20, 4, "StoreEntry::setMemStatus: inserted mem node " << mem_obj->url << " key: " << getMD5Text());
1649 }
1650
1651 hot_obj_count++; // TODO: maintain for the shared hot cache as well
1652 } else {
1653 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1654 debugs(20, 4, "StoreEntry::setMemStatus: special entry " << mem_obj->url);
1655 } else {
1656 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1657 debugs(20, 4, "StoreEntry::setMemStatus: removed mem node " << mem_obj->url);
1658 }
1659
1660 hot_obj_count--;
1661 }
1662
1663 mem_status = new_status;
1664 }
1665
1666 const char *
1667 StoreEntry::url() const
1668 {
1669 if (this == NULL)
1670 return "[null_entry]";
1671 else if (mem_obj == NULL)
1672 return "[null_mem_obj]";
1673 else
1674 return mem_obj->url;
1675 }
1676
1677 void
1678 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl)
1679 {
1680 if (mem_obj)
1681 return;
1682
1683 if (hidden_mem_obj) {
1684 debugs(20, 3, HERE << "restoring " << hidden_mem_obj);
1685 mem_obj = hidden_mem_obj;
1686 hidden_mem_obj = NULL;
1687 mem_obj->resetUrls(aUrl, aLogUrl);
1688 return;
1689 }
1690
1691 mem_obj = new MemObject(aUrl, aLogUrl);
1692 }
1693
1694 /* this just sets DELAY_SENDING */
1695 void
1696 StoreEntry::buffer()
1697 {
1698 EBIT_SET(flags, DELAY_SENDING);
1699 }
1700
1701 /* this just clears DELAY_SENDING and Invokes the handlers */
1702 void
1703 StoreEntry::flush()
1704 {
1705 if (EBIT_TEST(flags, DELAY_SENDING)) {
1706 EBIT_CLR(flags, DELAY_SENDING);
1707 invokeHandlers();
1708 }
1709 }
1710
1711 int64_t
1712 StoreEntry::objectLen() const
1713 {
1714 assert(mem_obj != NULL);
1715 return mem_obj->object_sz;
1716 }
1717
1718 int64_t
1719 StoreEntry::contentLen() const
1720 {
1721 assert(mem_obj != NULL);
1722 assert(getReply() != NULL);
1723 return objectLen() - getReply()->hdr_sz;
1724 }
1725
1726 HttpReply const *
1727 StoreEntry::getReply () const
1728 {
1729 if (NULL == mem_obj)
1730 return NULL;
1731
1732 return mem_obj->getReply();
1733 }
1734
1735 void
1736 StoreEntry::reset()
1737 {
1738 assert (mem_obj);
1739 debugs(20, 3, "StoreEntry::reset: " << url());
1740 mem_obj->reset();
1741 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1742 rep->reset();
1743 expires = lastmod = timestamp = -1;
1744 }
1745
1746 /*
1747 * storeFsInit
1748 *
1749 * This routine calls the SETUP routine for each fs type.
1750 * I don't know where the best place for this is, and I'm not going to shuffle
1751 * around large chunks of code right now (that can be done once its working.)
1752 */
1753 void
1754 storeFsInit(void)
1755 {
1756 storeReplSetup();
1757 }
1758
1759 /*
1760 * called to add another store removal policy module
1761 */
1762 void
1763 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1764 {
1765 int i;
1766
1767 /* find the number of currently known repl types */
1768 for (i = 0; storerepl_list && storerepl_list[i].typestr; i++) {
1769 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1770 debugs(20, 1, "WARNING: Trying to load store replacement policy " << type << " twice.");
1771 return;
1772 }
1773 }
1774
1775 /* add the new type */
1776 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1777
1778 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1779
1780 storerepl_list[i].typestr = type;
1781
1782 storerepl_list[i].create = create;
1783 }
1784
1785 /*
1786 * Create a removal policy instance
1787 */
1788 RemovalPolicy *
1789 createRemovalPolicy(RemovalPolicySettings * settings)
1790 {
1791 storerepl_entry_t *r;
1792
1793 for (r = storerepl_list; r && r->typestr; r++) {
1794 if (strcmp(r->typestr, settings->type) == 0)
1795 return r->create(settings->args);
1796 }
1797
1798 debugs(20, 1, "ERROR: Unknown policy " << settings->type);
1799 debugs(20, 1, "ERROR: Be sure to have set cache_replacement_policy");
1800 debugs(20, 1, "ERROR: and memory_replacement_policy in squid.conf!");
1801 fatalf("ERROR: Unknown policy %s\n", settings->type);
1802 return NULL; /* NOTREACHED */
1803 }
1804
1805 #if 0
1806 void
1807 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1808 {
1809 if (e->swap_file_number == filn)
1810 return;
1811
1812 if (filn < 0) {
1813 assert(-1 == filn);
1814 storeDirMapBitReset(e->swap_file_number);
1815 storeDirLRUDelete(e);
1816 e->swap_file_number = -1;
1817 } else {
1818 assert(-1 == e->swap_file_number);
1819 storeDirMapBitSet(e->swap_file_number = filn);
1820 storeDirLRUAdd(e);
1821 }
1822 }
1823
1824 #endif
1825
1826
1827 /*
1828 * Replace a store entry with
1829 * a new reply. This eats the reply.
1830 */
1831 void
1832 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1833 {
1834 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1835
1836 if (!mem_obj) {
1837 debugs(20, 0, "Attempt to replace object with no in-memory representation");
1838 return;
1839 }
1840
1841 mem_obj->replaceHttpReply(rep);
1842
1843 if (andStartWriting)
1844 startWriting();
1845 }
1846
1847
1848 void
1849 StoreEntry::startWriting()
1850 {
1851 Packer p;
1852
1853 /* TODO: when we store headers serparately remove the header portion */
1854 /* TODO: mark the length of the headers ? */
1855 /* We ONLY want the headers */
1856 packerToStoreInit(&p, this);
1857
1858 assert (isEmpty());
1859 assert(mem_obj);
1860
1861 const HttpReply *rep = getReply();
1862 assert(rep);
1863
1864 rep->packHeadersInto(&p);
1865 mem_obj->markEndOfReplyHeaders();
1866
1867 httpBodyPackInto(&rep->body, &p);
1868
1869 packerClean(&p);
1870 }
1871
1872
1873 char const *
1874 StoreEntry::getSerialisedMetaData()
1875 {
1876 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1877 int swap_hdr_sz;
1878 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1879 storeSwapTLVFree(tlv_list);
1880 assert (swap_hdr_sz >= 0);
1881 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1882 return result;
1883 }
1884
1885 bool
1886 StoreEntry::swapoutPossible()
1887 {
1888 if (!Config.cacheSwap.n_configured)
1889 return false;
1890
1891 /* should we swap something out to disk? */
1892 debugs(20, 7, "storeSwapOut: " << url());
1893 debugs(20, 7, "storeSwapOut: store_status = " << storeStatusStr[store_status]);
1894
1895 assert(mem_obj);
1896 MemObject::SwapOut::Decision &decision = mem_obj->swapout.decision;
1897
1898 // if we decided that swapout is not possible, do not repeat same checks
1899 if (decision == MemObject::SwapOut::swImpossible) {
1900 debugs(20, 3, "storeSwapOut: already rejected");
1901 return false;
1902 }
1903
1904 // this flag may change so we must check it even if we already said "yes"
1905 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
1906 assert(EBIT_TEST(flags, RELEASE_REQUEST));
1907 // StoreEntry::abort() already closed the swap out file, if any
1908 decision = MemObject::SwapOut::swImpossible;
1909 return false;
1910 }
1911
1912 // if we decided that swapout is possible, do not repeat same checks
1913 if (decision == MemObject::SwapOut::swPossible) {
1914 debugs(20, 3, "storeSwapOut: already allowed");
1915 return true;
1916 }
1917
1918 // if we are swapping out already, do not repeat same checks
1919 if (swap_status != SWAPOUT_NONE) {
1920 debugs(20, 3, "storeSwapOut: already started");
1921 decision = MemObject::SwapOut::swPossible;
1922 return true;
1923 }
1924
1925 if (!checkCachable()) {
1926 debugs(20, 3, "storeSwapOut: not cachable");
1927 decision = MemObject::SwapOut::swImpossible;
1928 return false;
1929 }
1930
1931 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1932 debugs(20, 3, "storeSwapOut: " << url() << " SPECIAL");
1933 decision = MemObject::SwapOut::swImpossible;
1934 return false;
1935 }
1936
1937 // check cache_dir max-size limit if all cache_dirs have it
1938 if (store_maxobjsize >= 0) {
1939 // TODO: add estimated store metadata size to be conservative
1940
1941 // use guaranteed maximum if it is known
1942 const int64_t expectedEnd = mem_obj->expectedReplySize();
1943 debugs(20, 7, "storeSwapOut: expectedEnd = " << expectedEnd);
1944 if (expectedEnd > store_maxobjsize) {
1945 debugs(20, 3, "storeSwapOut: will not fit: " << expectedEnd <<
1946 " > " << store_maxobjsize);
1947 decision = MemObject::SwapOut::swImpossible;
1948 return false; // known to outgrow the limit eventually
1949 }
1950
1951 // use current minimum (always known)
1952 const int64_t currentEnd = mem_obj->endOffset();
1953 if (currentEnd > store_maxobjsize) {
1954 debugs(20, 3, "storeSwapOut: does not fit: " << currentEnd <<
1955 " > " << store_maxobjsize);
1956 decision = MemObject::SwapOut::swImpossible;
1957 return false; // already does not fit and may only get bigger
1958 }
1959
1960 // prevent default swPossible answer for yet unknown length
1961 if (expectedEnd < 0) {
1962 debugs(20, 3, "storeSwapOut: wait for more info: " <<
1963 store_maxobjsize);
1964 return false; // may fit later, but will be rejected now
1965 }
1966 }
1967
1968 decision = MemObject::SwapOut::swPossible;
1969 return true;
1970 }
1971
1972 void
1973 StoreEntry::trimMemory()
1974 {
1975 /*
1976 * DPW 2007-05-09
1977 * Bug #1943. We must not let go any data for IN_MEMORY
1978 * objects. We have to wait until the mem_status changes.
1979 */
1980 if (mem_status == IN_MEMORY)
1981 return;
1982
1983 if (!swapOutAble()) {
1984 if (mem_obj->policyLowestOffsetToKeep(0) == 0) {
1985 /* Nothing to do */
1986 return;
1987 }
1988 /*
1989 * Its not swap-able, and we're about to delete a chunk,
1990 * so we must make it PRIVATE. This is tricky/ugly because
1991 * for the most part, we treat swapable == cachable here.
1992 */
1993 releaseRequest();
1994 mem_obj->trimUnSwappable ();
1995 } else {
1996 mem_obj->trimSwappable ();
1997 }
1998 }
1999
2000 bool
2001 StoreEntry::modifiedSince(HttpRequest * request) const
2002 {
2003 int object_length;
2004 time_t mod_time = lastmod;
2005
2006 if (mod_time < 0)
2007 mod_time = timestamp;
2008
2009 debugs(88, 3, "modifiedSince: '" << url() << "'");
2010
2011 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
2012
2013 if (mod_time < 0)
2014 return true;
2015
2016 /* Find size of the object */
2017 object_length = getReply()->content_length;
2018
2019 if (object_length < 0)
2020 object_length = contentLen();
2021
2022 if (mod_time > request->ims) {
2023 debugs(88, 3, "--> YES: entry newer than client");
2024 return true;
2025 } else if (mod_time < request->ims) {
2026 debugs(88, 3, "--> NO: entry older than client");
2027 return false;
2028 } else if (request->imslen < 0) {
2029 debugs(88, 3, "--> NO: same LMT, no client length");
2030 return false;
2031 } else if (request->imslen == object_length) {
2032 debugs(88, 3, "--> NO: same LMT, same length");
2033 return false;
2034 } else {
2035 debugs(88, 3, "--> YES: same LMT, different length");
2036 return true;
2037 }
2038 }
2039
2040 bool
2041 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2042 {
2043 const String reqETags = request.header.getList(HDR_IF_MATCH);
2044 return hasOneOfEtags(reqETags, false);
2045 }
2046
2047 bool
2048 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2049 {
2050 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
2051 // weak comparison is allowed only for HEAD or full-body GET requests
2052 const bool allowWeakMatch = !request.flags.range &&
2053 (request.method == METHOD_GET || request.method == METHOD_HEAD);
2054 return hasOneOfEtags(reqETags, allowWeakMatch);
2055 }
2056
2057 /// whether at least one of the request ETags matches entity ETag
2058 bool
2059 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2060 {
2061 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
2062 if (!repETag.str)
2063 return strListIsMember(&reqETags, "*", ',');
2064
2065 bool matched = false;
2066 const char *pos = NULL;
2067 const char *item;
2068 int ilen;
2069 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2070 if (!strncmp(item, "*", ilen))
2071 matched = true;
2072 else {
2073 String str;
2074 str.append(item, ilen);
2075 ETag reqETag;
2076 if (etagParseInit(&reqETag, str.termedBuf())) {
2077 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2078 etagIsStrongEqual(repETag, reqETag);
2079 }
2080 }
2081 }
2082 return matched;
2083 }
2084
2085 SwapDir::Pointer
2086 StoreEntry::store() const
2087 {
2088 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2089 return INDEXSD(swap_dirn);
2090 }
2091
2092 void
2093 StoreEntry::unlink()
2094 {
2095 store()->unlink(*this); // implies disconnect()
2096 swap_filen = -1;
2097 swap_dirn = -1;
2098 swap_status = SWAPOUT_NONE;
2099 }
2100
2101 /*
2102 * return true if the entry is in a state where
2103 * it can accept more data (ie with write() method)
2104 */
2105 bool
2106 StoreEntry::isAccepting() const
2107 {
2108 if (STORE_PENDING != store_status)
2109 return false;
2110
2111 if (EBIT_TEST(flags, ENTRY_ABORTED))
2112 return false;
2113
2114 return true;
2115 }
2116
2117 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2118 {
2119 return os << e.swap_filen << '@' << e.swap_dirn << '=' <<
2120 e.mem_status << '/' << e.ping_status << '/' << e.store_status << '/' <<
2121 e.swap_status;
2122 }
2123
2124 /* NullStoreEntry */
2125
2126 NullStoreEntry NullStoreEntry::_instance;
2127
2128 NullStoreEntry *
2129 NullStoreEntry::getInstance()
2130 {
2131 return &_instance;
2132 }
2133
2134 char const *
2135 NullStoreEntry::getMD5Text() const
2136 {
2137 return "N/A";
2138 }
2139
2140 void
2141 NullStoreEntry::operator delete(void*)
2142 {
2143 fatal ("Attempt to delete NullStoreEntry\n");
2144 }
2145
2146 char const *
2147 NullStoreEntry::getSerialisedMetaData()
2148 {
2149 return NULL;
2150 }
2151
2152 #if !_USE_INLINE_
2153 #include "Store.cci"
2154 #endif