]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Merge from trunk rev.14108
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
16 #include "ETag.h"
17 #include "event.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "http.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
23 #include "mem_node.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
33 #include "stmem.h"
34 #include "Store.h"
35 #include "store_digest.h"
36 #include "store_key_md5.h"
37 #include "store_key_md5.h"
38 #include "store_log.h"
39 #include "store_rebuild.h"
40 #include "StoreClient.h"
41 #include "StoreIOState.h"
42 #include "StoreMeta.h"
43 #include "StrList.h"
44 #include "swap_log_op.h"
45 #include "SwapDir.h"
46 #include "tools.h"
47 #if USE_DELAY_POOLS
48 #include "DelayPools.h"
49 #endif
50
51 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
52 * XXX: convert to MEMPROXY_CLASS() API
53 */
54 #include "mem/Pool.h"
55
56 #include <climits>
57 #include <stack>
58
59 #define REBUILD_TIMESTAMP_DELTA_MAX 2
60
61 #define STORE_IN_MEM_BUCKETS (229)
62
63 /** \todo Convert these string constants to enum string-arrays generated */
64
65 const char *memStatusStr[] = {
66 "NOT_IN_MEMORY",
67 "IN_MEMORY"
68 };
69
70 const char *pingStatusStr[] = {
71 "PING_NONE",
72 "PING_WAITING",
73 "PING_DONE"
74 };
75
76 const char *storeStatusStr[] = {
77 "STORE_OK",
78 "STORE_PENDING"
79 };
80
81 const char *swapStatusStr[] = {
82 "SWAPOUT_NONE",
83 "SWAPOUT_WRITING",
84 "SWAPOUT_DONE"
85 };
86
87 /*
88 * This defines an repl type
89 */
90
91 typedef struct _storerepl_entry storerepl_entry_t;
92
93 struct _storerepl_entry {
94 const char *typestr;
95 REMOVALPOLICYCREATE *create;
96 };
97
98 static storerepl_entry_t *storerepl_list = NULL;
99
100 /*
101 * local function prototypes
102 */
103 static int getKeyCounter(void);
104 static OBJH storeCheckCachableStats;
105 static EVH storeLateRelease;
106
107 /*
108 * local variables
109 */
110 static std::stack<StoreEntry*> LateReleaseStack;
111 MemAllocator *StoreEntry::pool = NULL;
112
113 StorePointer Store::CurrentRoot = NULL;
114
115 void
116 Store::Root(Store * aRoot)
117 {
118 CurrentRoot = aRoot;
119 }
120
121 void
122 Store::Root(StorePointer aRoot)
123 {
124 Root(aRoot.getRaw());
125 }
126
127 void
128 Store::Stats(StoreEntry * output)
129 {
130 assert(output);
131 Root().stat(*output);
132 }
133
134 void
135 Store::create()
136 {}
137
138 void
139 Store::diskFull()
140 {}
141
142 void
143 Store::sync()
144 {}
145
146 void
147 Store::unlink(StoreEntry &)
148 {
149 fatal("Store::unlink on invalid Store\n");
150 }
151
152 void *
153 StoreEntry::operator new (size_t bytecount)
154 {
155 assert(bytecount == sizeof (StoreEntry));
156
157 if (!pool) {
158 pool = memPoolCreate ("StoreEntry", bytecount);
159 pool->setChunkSize(2048 * 1024);
160 }
161
162 return pool->alloc();
163 }
164
165 void
166 StoreEntry::operator delete (void *address)
167 {
168 pool->freeOne(address);
169 }
170
171 void
172 StoreEntry::makePublic()
173 {
174 /* This object can be cached for a long time */
175
176 if (!EBIT_TEST(flags, RELEASE_REQUEST))
177 setPublicKey();
178 }
179
180 void
181 StoreEntry::makePrivate()
182 {
183 /* This object should never be cached at all */
184 expireNow();
185 releaseRequest(); /* delete object when not used */
186 }
187
188 void
189 StoreEntry::cacheNegatively()
190 {
191 /* This object may be negatively cached */
192 negativeCache();
193 makePublic();
194 }
195
196 size_t
197 StoreEntry::inUseCount()
198 {
199 if (!pool)
200 return 0;
201 return pool->getInUseCount();
202 }
203
204 const char *
205 StoreEntry::getMD5Text() const
206 {
207 return storeKeyText((const cache_key *)key);
208 }
209
210 #include "comm.h"
211
212 void
213 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
214 {
215 StoreEntry *anEntry = (StoreEntry *)theContext;
216 anEntry->delayAwareRead(aRead.conn,
217 aRead.buf,
218 aRead.len,
219 aRead.callback);
220 }
221
222 void
223 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
224 {
225 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
226 /* sketch: readdeferer* = getdeferer.
227 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
228 */
229
230 if (amountToRead == 0) {
231 assert (mem_obj);
232 /* read ahead limit */
233 /* Perhaps these two calls should both live in MemObject */
234 #if USE_DELAY_POOLS
235 if (!mem_obj->readAheadPolicyCanRead()) {
236 #endif
237 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
238 return;
239 #if USE_DELAY_POOLS
240 }
241
242 /* delay id limit */
243 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
244 return;
245
246 #endif
247
248 }
249
250 if (fd_table[conn->fd].closing()) {
251 // Readers must have closing callbacks if they want to be notified. No
252 // readers appeared to care around 2009/12/14 as they skipped reading
253 // for other reasons. Closing may already be true at the delyaAwareRead
254 // call time or may happen while we wait after delayRead() above.
255 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
256 callback);
257 return; // the read callback will never be called
258 }
259
260 comm_read(conn, buf, amountToRead, callback);
261 }
262
263 size_t
264 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
265 {
266 if (mem_obj == NULL)
267 return aRange.end;
268
269 #if URL_CHECKSUM_DEBUG
270
271 mem_obj->checkUrlChecksum();
272
273 #endif
274
275 if (!mem_obj->readAheadPolicyCanRead())
276 return 0;
277
278 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
279 }
280
281 bool
282 StoreEntry::checkDeferRead(int) const
283 {
284 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
285 }
286
287 void
288 StoreEntry::setNoDelay(bool const newValue)
289 {
290 if (mem_obj)
291 mem_obj->setNoDelay(newValue);
292 }
293
294 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
295 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
296 // It does not mean we will read from disk exclusively (or at all!).
297 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
298 // XXX: Collapsed clients cannot predict their type.
299 store_client_t
300 StoreEntry::storeClientType() const
301 {
302 /* The needed offset isn't in memory
303 * XXX TODO: this is wrong for range requests
304 * as the needed offset may *not* be 0, AND
305 * offset 0 in the memory object is the HTTP headers.
306 */
307
308 assert(mem_obj);
309
310 if (mem_obj->inmem_lo)
311 return STORE_DISK_CLIENT;
312
313 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
314 /* I don't think we should be adding clients to aborted entries */
315 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
316 return STORE_MEM_CLIENT;
317 }
318
319 if (store_status == STORE_OK) {
320 /* the object has completed. */
321
322 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
323 if (swap_status == SWAPOUT_DONE) {
324 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
325 if (mem_obj->endOffset() == mem_obj->object_sz) {
326 /* hot object fully swapped in (XXX: or swapped out?) */
327 return STORE_MEM_CLIENT;
328 }
329 } else {
330 /* Memory-only, or currently being swapped out */
331 return STORE_MEM_CLIENT;
332 }
333 }
334 return STORE_DISK_CLIENT;
335 }
336
337 /* here and past, entry is STORE_PENDING */
338 /*
339 * If this is the first client, let it be the mem client
340 */
341 if (mem_obj->nclients == 1)
342 return STORE_MEM_CLIENT;
343
344 /*
345 * If there is no disk file to open yet, we must make this a
346 * mem client. If we can't open the swapin file before writing
347 * to the client, there is no guarantee that we will be able
348 * to open it later when we really need it.
349 */
350 if (swap_status == SWAPOUT_NONE)
351 return STORE_MEM_CLIENT;
352
353 /*
354 * otherwise, make subsequent clients read from disk so they
355 * can not delay the first, and vice-versa.
356 */
357 return STORE_DISK_CLIENT;
358 }
359
360 StoreEntry::StoreEntry() :
361 mem_obj(NULL),
362 timestamp(-1),
363 lastref(-1),
364 expires(-1),
365 lastmod(-1),
366 swap_file_sz(0),
367 refcount(0),
368 flags(0),
369 swap_filen(-1),
370 swap_dirn(-1),
371 mem_status(NOT_IN_MEMORY),
372 ping_status(PING_NONE),
373 store_status(STORE_PENDING),
374 swap_status(SWAPOUT_NONE),
375 lock_count(0)
376 {
377 debugs(20, 5, "StoreEntry constructed, this=" << this);
378 }
379
380 StoreEntry::~StoreEntry()
381 {
382 debugs(20, 5, "StoreEntry destructed, this=" << this);
383 }
384
385 #if USE_ADAPTATION
386 void
387 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
388 {
389 if (!deferredProducer)
390 deferredProducer = producer;
391 else
392 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
393 *deferredProducer << ", requested call: " << *producer);
394 }
395
396 void
397 StoreEntry::kickProducer()
398 {
399 if (deferredProducer != NULL) {
400 ScheduleCallHere(deferredProducer);
401 deferredProducer = NULL;
402 }
403 }
404 #endif
405
406 void
407 StoreEntry::destroyMemObject()
408 {
409 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
410
411 if (MemObject *mem = mem_obj) {
412 // Store::Root() is FATALly missing during shutdown
413 if (mem->xitTable.index >= 0 && !shutting_down)
414 Store::Root().transientsDisconnect(*mem);
415 if (mem->memCache.index >= 0 && !shutting_down)
416 Store::Root().memoryDisconnect(*this);
417
418 setMemStatus(NOT_IN_MEMORY);
419 mem_obj = NULL;
420 delete mem;
421 }
422 }
423
424 void
425 destroyStoreEntry(void *data)
426 {
427 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
428 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
429 assert(e != NULL);
430
431 if (e == NullStoreEntry::getInstance())
432 return;
433
434 // Store::Root() is FATALly missing during shutdown
435 if (e->swap_filen >= 0 && !shutting_down) {
436 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
437 sd.disconnect(*e);
438 }
439
440 e->destroyMemObject();
441
442 e->hashDelete();
443
444 assert(e->key == NULL);
445
446 delete e;
447 }
448
449 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
450
451 void
452 StoreEntry::hashInsert(const cache_key * someKey)
453 {
454 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
455 key = storeKeyDup(someKey);
456 hash_join(store_table, this);
457 }
458
459 void
460 StoreEntry::hashDelete()
461 {
462 if (key) { // some test cases do not create keys and do not hashInsert()
463 hash_remove_link(store_table, this);
464 storeKeyFree((const cache_key *)key);
465 key = NULL;
466 }
467 }
468
469 /* -------------------------------------------------------------------------- */
470
471 /* get rid of memory copy of the object */
472 void
473 StoreEntry::purgeMem()
474 {
475 if (mem_obj == NULL)
476 return;
477
478 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
479
480 Store::Root().memoryUnlink(*this);
481
482 if (swap_status != SWAPOUT_DONE)
483 release();
484 }
485
486 void
487 StoreEntry::lock(const char *context)
488 {
489 ++lock_count;
490 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
491 }
492
493 void
494 StoreEntry::touch()
495 {
496 lastref = squid_curtime;
497 Store::Root().reference(*this);
498 }
499
500 void
501 StoreEntry::setReleaseFlag()
502 {
503 if (EBIT_TEST(flags, RELEASE_REQUEST))
504 return;
505
506 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
507
508 EBIT_SET(flags, RELEASE_REQUEST);
509
510 Store::Root().markForUnlink(*this);
511 }
512
513 void
514 StoreEntry::releaseRequest()
515 {
516 if (EBIT_TEST(flags, RELEASE_REQUEST))
517 return;
518
519 setReleaseFlag(); // makes validToSend() false, preventing future hits
520
521 setPrivateKey();
522 }
523
524 int
525 StoreEntry::unlock(const char *context)
526 {
527 debugs(20, 3, (context ? context : "somebody") <<
528 " unlocking key " << getMD5Text() << ' ' << *this);
529 assert(lock_count > 0);
530 --lock_count;
531
532 if (lock_count)
533 return (int) lock_count;
534
535 if (store_status == STORE_PENDING)
536 setReleaseFlag();
537
538 assert(storePendingNClients(this) == 0);
539
540 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
541 this->release();
542 return 0;
543 }
544
545 if (EBIT_TEST(flags, KEY_PRIVATE))
546 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
547
548 Store::Root().handleIdleEntry(*this); // may delete us
549 return 0;
550 }
551
552 void
553 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
554 {
555 assert (aClient);
556 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
557
558 if (!result)
559 aClient->created (NullStoreEntry::getInstance());
560 else
561 aClient->created (result);
562 }
563
564 void
565 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
566 {
567 assert (aClient);
568 StoreEntry *result = storeGetPublicByRequest (request);
569
570 if (!result)
571 result = NullStoreEntry::getInstance();
572
573 aClient->created (result);
574 }
575
576 void
577 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
578 {
579 assert (aClient);
580 StoreEntry *result = storeGetPublic (uri, method);
581
582 if (!result)
583 result = NullStoreEntry::getInstance();
584
585 aClient->created (result);
586 }
587
588 StoreEntry *
589 storeGetPublic(const char *uri, const HttpRequestMethod& method)
590 {
591 return Store::Root().get(storeKeyPublic(uri, method));
592 }
593
594 StoreEntry *
595 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
596 {
597 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
598 }
599
600 StoreEntry *
601 storeGetPublicByRequest(HttpRequest * req)
602 {
603 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
604
605 if (e == NULL && req->method == Http::METHOD_HEAD)
606 /* We can generate a HEAD reply from a cached GET object */
607 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
608
609 return e;
610 }
611
612 static int
613 getKeyCounter(void)
614 {
615 static int key_counter = 0;
616
617 if (++key_counter < 0)
618 key_counter = 1;
619
620 return key_counter;
621 }
622
623 /* RBC 20050104 AFAICT this should become simpler:
624 * rather than reinserting with a special key it should be marked
625 * as 'released' and then cleaned up when refcounting indicates.
626 * the StoreHashIndex could well implement its 'released' in the
627 * current manner.
628 * Also, clean log writing should skip over ia,t
629 * Otherwise, we need a 'remove from the index but not the store
630 * concept'.
631 */
632 void
633 StoreEntry::setPrivateKey()
634 {
635 const cache_key *newkey;
636
637 if (key && EBIT_TEST(flags, KEY_PRIVATE))
638 return; /* is already private */
639
640 if (key) {
641 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
642
643 // TODO: move into SwapDir::markForUnlink() already called by Root()
644 if (swap_filen > -1)
645 storeDirSwapLog(this, SWAP_LOG_DEL);
646
647 hashDelete();
648 }
649
650 if (mem_obj && mem_obj->hasUris()) {
651 mem_obj->id = getKeyCounter();
652 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
653 } else {
654 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
655 }
656
657 assert(hash_lookup(store_table, newkey) == NULL);
658 EBIT_SET(flags, KEY_PRIVATE);
659 hashInsert(newkey);
660 }
661
662 void
663 StoreEntry::setPublicKey()
664 {
665 const cache_key *newkey;
666
667 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
668 return; /* is already public */
669
670 assert(mem_obj);
671
672 /*
673 * We can't make RELEASE_REQUEST objects public. Depending on
674 * when RELEASE_REQUEST gets set, we might not be swapping out
675 * the object. If we're not swapping out, then subsequent
676 * store clients won't be able to access object data which has
677 * been freed from memory.
678 *
679 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
680 */
681 #if MORE_DEBUG_OUTPUT
682
683 if (EBIT_TEST(flags, RELEASE_REQUEST))
684 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
685
686 #endif
687
688 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
689
690 if (mem_obj->request) {
691 HttpRequest *request = mem_obj->request;
692
693 if (!mem_obj->vary_headers) {
694 /* First handle the case where the object no longer varies */
695 safe_free(request->vary_headers);
696 } else {
697 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
698 /* Oops.. the variance has changed. Kill the base object
699 * to record the new variance key
700 */
701 safe_free(request->vary_headers); /* free old "bad" variance key */
702 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
703 pe->release();
704 }
705
706 /* Make sure the request knows the variance status */
707 if (!request->vary_headers) {
708 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
709
710 if (vary)
711 request->vary_headers = xstrdup(vary);
712 }
713 }
714
715 // TODO: storeGetPublic() calls below may create unlocked entries.
716 // We should add/use storeHas() API or lock/unlock those entries.
717 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
718 /* Create "vary" base object */
719 String vary;
720 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
721 /* We are allowed to do this typecast */
722 HttpReply *rep = new HttpReply;
723 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
724 vary = mem_obj->getReply()->header.getList(HDR_VARY);
725
726 if (vary.size()) {
727 /* Again, we own this structure layout */
728 rep->header.putStr(HDR_VARY, vary.termedBuf());
729 vary.clean();
730 }
731
732 #if X_ACCELERATOR_VARY
733 vary = mem_obj->getReply()->header.getList(HDR_X_ACCELERATOR_VARY);
734
735 if (vary.size() > 0) {
736 /* Again, we own this structure layout */
737 rep->header.putStr(HDR_X_ACCELERATOR_VARY, vary.termedBuf());
738 vary.clean();
739 }
740
741 #endif
742 pe->replaceHttpReply(rep, false); // no write until key is public
743
744 pe->timestampsSet();
745
746 pe->makePublic();
747
748 pe->startWriting(); // after makePublic()
749
750 pe->complete();
751
752 pe->unlock("StoreEntry::setPublicKey+Vary");
753 }
754
755 newkey = storeKeyPublicByRequest(mem_obj->request);
756 } else
757 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
758
759 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
760 debugs(20, 3, "Making old " << *e2 << " private.");
761 e2->setPrivateKey();
762 e2->release();
763
764 if (mem_obj->request)
765 newkey = storeKeyPublicByRequest(mem_obj->request);
766 else
767 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
768 }
769
770 if (key)
771 hashDelete();
772
773 EBIT_CLR(flags, KEY_PRIVATE);
774
775 hashInsert(newkey);
776
777 if (swap_filen > -1)
778 storeDirSwapLog(this, SWAP_LOG_ADD);
779 }
780
781 StoreEntry *
782 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
783 {
784 StoreEntry *e = NULL;
785 debugs(20, 3, "storeCreateEntry: '" << url << "'");
786
787 e = new StoreEntry();
788 e->makeMemObject();
789 e->mem_obj->setUris(url, log_url, method);
790
791 if (flags.cachable) {
792 EBIT_CLR(e->flags, RELEASE_REQUEST);
793 } else {
794 e->releaseRequest();
795 }
796
797 e->store_status = STORE_PENDING;
798 e->refcount = 0;
799 e->lastref = squid_curtime;
800 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
801 e->ping_status = PING_NONE;
802 EBIT_SET(e->flags, ENTRY_VALIDATED);
803 return e;
804 }
805
806 StoreEntry *
807 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
808 {
809 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
810 e->lock("storeCreateEntry");
811
812 if (neighbors_do_private_keys || !flags.hierarchical)
813 e->setPrivateKey();
814 else
815 e->setPublicKey();
816
817 return e;
818 }
819
820 /* Mark object as expired */
821 void
822 StoreEntry::expireNow()
823 {
824 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
825 expires = squid_curtime;
826 }
827
828 void
829 StoreEntry::write (StoreIOBuffer writeBuffer)
830 {
831 assert(mem_obj != NULL);
832 /* This assert will change when we teach the store to update */
833 PROF_start(StoreEntry_write);
834 assert(store_status == STORE_PENDING);
835
836 // XXX: caller uses content offset, but we also store headers
837 if (const HttpReply *reply = mem_obj->getReply())
838 writeBuffer.offset += reply->hdr_sz;
839
840 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
841 PROF_stop(StoreEntry_write);
842 storeGetMemSpace(writeBuffer.length);
843 mem_obj->write(writeBuffer);
844
845 if (!EBIT_TEST(flags, DELAY_SENDING))
846 invokeHandlers();
847 }
848
849 /* Append incoming data from a primary server to an entry. */
850 void
851 StoreEntry::append(char const *buf, int len)
852 {
853 assert(mem_obj != NULL);
854 assert(len >= 0);
855 assert(store_status == STORE_PENDING);
856
857 StoreIOBuffer tempBuffer;
858 tempBuffer.data = (char *)buf;
859 tempBuffer.length = len;
860 /*
861 * XXX sigh, offset might be < 0 here, but it gets "corrected"
862 * later. This offset crap is such a mess.
863 */
864 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
865 write(tempBuffer);
866 }
867
868 void
869 StoreEntry::vappendf(const char *fmt, va_list vargs)
870 {
871 LOCAL_ARRAY(char, buf, 4096);
872 *buf = 0;
873 int x;
874
875 #ifdef VA_COPY
876 va_args ap;
877 /* Fix of bug 753r. The value of vargs is undefined
878 * after vsnprintf() returns. Make a copy of vargs
879 * incase we loop around and call vsnprintf() again.
880 */
881 VA_COPY(ap,vargs);
882 errno = 0;
883 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
884 fatal(xstrerr(errno));
885 return;
886 }
887 va_end(ap);
888 #else /* VA_COPY */
889 errno = 0;
890 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
891 fatal(xstrerr(errno));
892 return;
893 }
894 #endif /*VA_COPY*/
895
896 if (x < static_cast<int>(sizeof(buf))) {
897 append(buf, x);
898 return;
899 }
900
901 // okay, do it the slow way.
902 char *buf2 = new char[x+1];
903 int y = vsnprintf(buf2, x+1, fmt, vargs);
904 assert(y >= 0 && y == x);
905 append(buf2, y);
906 delete[] buf2;
907 }
908
909 // deprecated. use StoreEntry::appendf() instead.
910 void
911 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
912 {
913 va_list args;
914 va_start(args, fmt);
915 e->vappendf(fmt, args);
916 va_end(args);
917 }
918
919 // deprecated. use StoreEntry::appendf() instead.
920 void
921 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
922 {
923 e->vappendf(fmt, vargs);
924 }
925
926 struct _store_check_cachable_hist {
927
928 struct {
929 int non_get;
930 int not_entry_cachable;
931 int wrong_content_length;
932 int negative_cached;
933 int too_big;
934 int too_small;
935 int private_key;
936 int too_many_open_files;
937 int too_many_open_fds;
938 int missing_parts;
939 } no;
940
941 struct {
942 int Default;
943 } yes;
944 } store_check_cachable_hist;
945
946 int
947 storeTooManyDiskFilesOpen(void)
948 {
949 if (Config.max_open_disk_fds == 0)
950 return 0;
951
952 if (store_open_disk_fd > Config.max_open_disk_fds)
953 return 1;
954
955 return 0;
956 }
957
958 int
959 StoreEntry::checkTooSmall()
960 {
961 if (EBIT_TEST(flags, ENTRY_SPECIAL))
962 return 0;
963
964 if (STORE_OK == store_status)
965 if (mem_obj->object_sz >= 0 &&
966 mem_obj->object_sz < Config.Store.minObjectSize)
967 return 1;
968 if (getReply()->content_length > -1)
969 if (getReply()->content_length < Config.Store.minObjectSize)
970 return 1;
971 return 0;
972 }
973
974 bool
975 StoreEntry::checkTooBig() const
976 {
977 if (mem_obj->endOffset() > store_maxobjsize)
978 return true;
979
980 if (getReply()->content_length < 0)
981 return false;
982
983 return (getReply()->content_length > store_maxobjsize);
984 }
985
986 // TODO: move "too many open..." checks outside -- we are called too early/late
987 bool
988 StoreEntry::checkCachable()
989 {
990 // XXX: This method is used for both memory and disk caches, but some
991 // checks are specific to disk caches. Move them to mayStartSwapOut().
992
993 // XXX: This method may be called several times, sometimes with different
994 // outcomes, making store_check_cachable_hist counters misleading.
995
996 // check this first to optimize handling of repeated calls for uncachables
997 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
998 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
999 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
1000 return 0; // avoid rerequesting release below
1001 }
1002
1003 #if CACHE_ALL_METHODS
1004
1005 if (mem_obj->method != Http::METHOD_GET) {
1006 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
1007 ++store_check_cachable_hist.no.non_get;
1008 } else
1009 #endif
1010 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
1011 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
1012 ++store_check_cachable_hist.no.wrong_content_length;
1013 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
1014 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
1015 ++store_check_cachable_hist.no.negative_cached;
1016 return 0; /* avoid release call below */
1017 } else if (!mem_obj || !getReply()) {
1018 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1019 // this segfault protection, but how can we get such a HIT?
1020 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1021 ++store_check_cachable_hist.no.missing_parts;
1022 } else if (checkTooBig()) {
1023 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1024 ++store_check_cachable_hist.no.too_big;
1025 } else if (checkTooSmall()) {
1026 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1027 ++store_check_cachable_hist.no.too_small;
1028 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1029 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1030 ++store_check_cachable_hist.no.private_key;
1031 } else if (swap_status != SWAPOUT_NONE) {
1032 /*
1033 * here we checked the swap_status because the remaining
1034 * cases are only relevant only if we haven't started swapping
1035 * out the object yet.
1036 */
1037 return 1;
1038 } else if (storeTooManyDiskFilesOpen()) {
1039 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1040 ++store_check_cachable_hist.no.too_many_open_files;
1041 } else if (fdNFree() < RESERVED_FD) {
1042 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1043 ++store_check_cachable_hist.no.too_many_open_fds;
1044 } else {
1045 ++store_check_cachable_hist.yes.Default;
1046 return 1;
1047 }
1048
1049 releaseRequest();
1050 return 0;
1051 }
1052
1053 void
1054 storeCheckCachableStats(StoreEntry *sentry)
1055 {
1056 storeAppendPrintf(sentry, "Category\t Count\n");
1057
1058 #if CACHE_ALL_METHODS
1059
1060 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1061 store_check_cachable_hist.no.non_get);
1062 #endif
1063
1064 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1065 store_check_cachable_hist.no.not_entry_cachable);
1066 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1067 store_check_cachable_hist.no.wrong_content_length);
1068 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1069 store_check_cachable_hist.no.negative_cached);
1070 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1071 store_check_cachable_hist.no.missing_parts);
1072 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1073 store_check_cachable_hist.no.too_big);
1074 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1075 store_check_cachable_hist.no.too_small);
1076 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1077 store_check_cachable_hist.no.private_key);
1078 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1079 store_check_cachable_hist.no.too_many_open_files);
1080 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1081 store_check_cachable_hist.no.too_many_open_fds);
1082 storeAppendPrintf(sentry, "yes.default\t%d\n",
1083 store_check_cachable_hist.yes.Default);
1084 }
1085
1086 void
1087 StoreEntry::complete()
1088 {
1089 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1090
1091 if (store_status != STORE_PENDING) {
1092 /*
1093 * if we're not STORE_PENDING, then probably we got aborted
1094 * and there should be NO clients on this entry
1095 */
1096 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1097 assert(mem_obj->nclients == 0);
1098 return;
1099 }
1100
1101 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1102 * in use of object_sz?
1103 */
1104 mem_obj->object_sz = mem_obj->endOffset();
1105
1106 store_status = STORE_OK;
1107
1108 assert(mem_status == NOT_IN_MEMORY);
1109
1110 if (!validLength()) {
1111 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1112 releaseRequest();
1113 }
1114
1115 #if USE_CACHE_DIGESTS
1116 if (mem_obj->request)
1117 mem_obj->request->hier.store_complete_stop = current_time;
1118
1119 #endif
1120 /*
1121 * We used to call invokeHandlers, then storeSwapOut. However,
1122 * Madhukar Reddy <myreddy@persistence.com> reported that
1123 * responses without content length would sometimes get released
1124 * in client_side, thinking that the response is incomplete.
1125 */
1126 invokeHandlers();
1127 }
1128
1129 /*
1130 * Someone wants to abort this transfer. Set the reason in the
1131 * request structure, call the callback and mark the
1132 * entry for releasing
1133 */
1134 void
1135 StoreEntry::abort()
1136 {
1137 ++statCounter.aborted_requests;
1138 assert(store_status == STORE_PENDING);
1139 assert(mem_obj != NULL);
1140 debugs(20, 6, "storeAbort: " << getMD5Text());
1141
1142 lock("StoreEntry::abort"); /* lock while aborting */
1143 negativeCache();
1144
1145 releaseRequest();
1146
1147 EBIT_SET(flags, ENTRY_ABORTED);
1148
1149 setMemStatus(NOT_IN_MEMORY);
1150
1151 store_status = STORE_OK;
1152
1153 /* Notify the server side */
1154
1155 /*
1156 * DPW 2007-05-07
1157 * Should we check abort.data for validity?
1158 */
1159 if (mem_obj->abort.callback) {
1160 if (!cbdataReferenceValid(mem_obj->abort.data))
1161 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1162 eventAdd("mem_obj->abort.callback",
1163 mem_obj->abort.callback,
1164 mem_obj->abort.data,
1165 0.0,
1166 true);
1167 unregisterAbort();
1168 }
1169
1170 /* XXX Should we reverse these two, so that there is no
1171 * unneeded disk swapping triggered?
1172 */
1173 /* Notify the client side */
1174 invokeHandlers();
1175
1176 // abort swap out, invalidating what was created so far (release follows)
1177 swapOutFileClose(StoreIOState::writerGone);
1178
1179 unlock("StoreEntry::abort"); /* unlock */
1180 }
1181
1182 /**
1183 * Clear Memory storage to accommodate the given object len
1184 */
1185 void
1186 storeGetMemSpace(int size)
1187 {
1188 PROF_start(storeGetMemSpace);
1189 StoreEntry *e = NULL;
1190 int released = 0;
1191 static time_t last_check = 0;
1192 size_t pages_needed;
1193 RemovalPurgeWalker *walker;
1194
1195 if (squid_curtime == last_check) {
1196 PROF_stop(storeGetMemSpace);
1197 return;
1198 }
1199
1200 last_check = squid_curtime;
1201
1202 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1203
1204 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1205 PROF_stop(storeGetMemSpace);
1206 return;
1207 }
1208
1209 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1210 " pages");
1211
1212 /* XXX what to set as max_scan here? */
1213 walker = mem_policy->PurgeInit(mem_policy, 100000);
1214
1215 while ((e = walker->Next(walker))) {
1216 e->purgeMem();
1217 ++released;
1218
1219 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1220 break;
1221 }
1222
1223 walker->Done(walker);
1224 debugs(20, 3, "storeGetMemSpace stats:");
1225 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1226 debugs(20, 3, " " << std::setw(6) << released << " were released");
1227 PROF_stop(storeGetMemSpace);
1228 }
1229
1230 /* thunk through to Store::Root().maintain(). Note that this would be better still
1231 * if registered against the root store itself, but that requires more complex
1232 * update logic - bigger fish to fry first. Long term each store when
1233 * it becomes active will self register
1234 */
1235 void
1236 Store::Maintain(void *)
1237 {
1238 Store::Root().maintain();
1239
1240 /* Reregister a maintain event .. */
1241 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1242
1243 }
1244
1245 /* The maximum objects to scan for maintain storage space */
1246 #define MAINTAIN_MAX_SCAN 1024
1247 #define MAINTAIN_MAX_REMOVE 64
1248
1249 /*
1250 * This routine is to be called by main loop in main.c.
1251 * It removes expired objects on only one bucket for each time called.
1252 *
1253 * This should get called 1/s from main().
1254 */
1255 void
1256 StoreController::maintain()
1257 {
1258 static time_t last_warn_time = 0;
1259
1260 PROF_start(storeMaintainSwapSpace);
1261 swapDir->maintain();
1262
1263 /* this should be emitted by the oversize dir, not globally */
1264
1265 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1266 if (squid_curtime - last_warn_time > 10) {
1267 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1268 << Store::Root().currentSize() / 1024.0 << " KB > "
1269 << (Store::Root().maxSize() >> 10) << " KB");
1270 last_warn_time = squid_curtime;
1271 }
1272 }
1273
1274 PROF_stop(storeMaintainSwapSpace);
1275 }
1276
1277 /* release an object from a cache */
1278 void
1279 StoreEntry::release()
1280 {
1281 PROF_start(storeRelease);
1282 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1283 /* If, for any reason we can't discard this object because of an
1284 * outstanding request, mark it for pending release */
1285
1286 if (locked()) {
1287 expireNow();
1288 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1289 releaseRequest();
1290 PROF_stop(storeRelease);
1291 return;
1292 }
1293
1294 Store::Root().memoryUnlink(*this);
1295
1296 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1297 setPrivateKey();
1298
1299 if (swap_filen > -1) {
1300 // lock the entry until rebuilding is done
1301 lock("storeLateRelease");
1302 setReleaseFlag();
1303 LateReleaseStack.push(this);
1304 } else {
1305 destroyStoreEntry(static_cast<hash_link *>(this));
1306 // "this" is no longer valid
1307 }
1308
1309 PROF_stop(storeRelease);
1310 return;
1311 }
1312
1313 storeLog(STORE_LOG_RELEASE, this);
1314
1315 if (swap_filen > -1) {
1316 // log before unlink() below clears swap_filen
1317 if (!EBIT_TEST(flags, KEY_PRIVATE))
1318 storeDirSwapLog(this, SWAP_LOG_DEL);
1319
1320 unlink();
1321 }
1322
1323 destroyStoreEntry(static_cast<hash_link *>(this));
1324 PROF_stop(storeRelease);
1325 }
1326
1327 static void
1328 storeLateRelease(void *)
1329 {
1330 StoreEntry *e;
1331 static int n = 0;
1332
1333 if (StoreController::store_dirs_rebuilding) {
1334 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1335 return;
1336 }
1337
1338 // TODO: this works but looks unelegant.
1339 for (int i = 0; i < 10; ++i) {
1340 if (LateReleaseStack.empty()) {
1341 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1342 return;
1343 } else {
1344 e = LateReleaseStack.top();
1345 LateReleaseStack.pop();
1346 }
1347
1348 e->unlock("storeLateRelease");
1349 ++n;
1350 }
1351
1352 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1353 }
1354
1355 /* return 1 if a store entry is locked */
1356 int
1357 StoreEntry::locked() const
1358 {
1359 if (lock_count)
1360 return 1;
1361
1362 /*
1363 * SPECIAL, PUBLIC entries should be "locked";
1364 * XXX: Their owner should lock them then instead of relying on this hack.
1365 */
1366 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1367 if (!EBIT_TEST(flags, KEY_PRIVATE))
1368 return 1;
1369
1370 return 0;
1371 }
1372
1373 bool
1374 StoreEntry::validLength() const
1375 {
1376 int64_t diff;
1377 const HttpReply *reply;
1378 assert(mem_obj != NULL);
1379 reply = getReply();
1380 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1381 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1382 objectLen());
1383 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1384 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1385
1386 if (reply->content_length < 0) {
1387 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1388 return 1;
1389 }
1390
1391 if (reply->hdr_sz == 0) {
1392 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1393 return 1;
1394 }
1395
1396 if (mem_obj->method == Http::METHOD_HEAD) {
1397 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1398 return 1;
1399 }
1400
1401 if (reply->sline.status() == Http::scNotModified)
1402 return 1;
1403
1404 if (reply->sline.status() == Http::scNoContent)
1405 return 1;
1406
1407 diff = reply->hdr_sz + reply->content_length - objectLen();
1408
1409 if (diff == 0)
1410 return 1;
1411
1412 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1413
1414 return 0;
1415 }
1416
1417 static void
1418 storeRegisterWithCacheManager(void)
1419 {
1420 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1421 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1422 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1423 storeCheckCachableStats, 0, 1);
1424 }
1425
1426 void
1427 storeInit(void)
1428 {
1429 storeKeyInit();
1430 mem_policy = createRemovalPolicy(Config.memPolicy);
1431 storeDigestInit();
1432 storeLogOpen();
1433 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1434 Store::Root().init();
1435 storeRebuildStart();
1436
1437 storeRegisterWithCacheManager();
1438 }
1439
1440 /// computes maximum size of a cachable object
1441 /// larger objects are rejected by all (disk and memory) cache stores
1442 static int64_t
1443 storeCalcMaxObjSize()
1444 {
1445 int64_t ms = 0; // nothing can be cached without at least one store consent
1446
1447 // global maximum is at least the disk store maximum
1448 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1449 assert (Config.cacheSwap.swapDirs[i].getRaw());
1450 const int64_t storeMax = dynamic_cast<SwapDir *>(Config.cacheSwap.swapDirs[i].getRaw())->maxObjectSize();
1451 if (ms < storeMax)
1452 ms = storeMax;
1453 }
1454
1455 // global maximum is at least the memory store maximum
1456 // TODO: move this into a memory cache class when we have one
1457 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
1458 if (ms < memMax)
1459 ms = memMax;
1460
1461 return ms;
1462 }
1463
1464 void
1465 storeConfigure(void)
1466 {
1467 store_swap_high = (long) (((float) Store::Root().maxSize() *
1468 (float) Config.Swap.highWaterMark) / (float) 100);
1469 store_swap_low = (long) (((float) Store::Root().maxSize() *
1470 (float) Config.Swap.lowWaterMark) / (float) 100);
1471 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1472
1473 store_maxobjsize = storeCalcMaxObjSize();
1474 }
1475
1476 bool
1477 StoreEntry::memoryCachable()
1478 {
1479 if (!checkCachable())
1480 return 0;
1481
1482 if (mem_obj == NULL)
1483 return 0;
1484
1485 if (mem_obj->data_hdr.size() == 0)
1486 return 0;
1487
1488 if (mem_obj->inmem_lo != 0)
1489 return 0;
1490
1491 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1492 return 0;
1493
1494 return 1;
1495 }
1496
1497 int
1498 StoreEntry::checkNegativeHit() const
1499 {
1500 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1501 return 0;
1502
1503 if (expires <= squid_curtime)
1504 return 0;
1505
1506 if (store_status != STORE_OK)
1507 return 0;
1508
1509 return 1;
1510 }
1511
1512 /**
1513 * Set object for negative caching.
1514 * Preserves any expiry information given by the server.
1515 * In absence of proper expiry info it will set to expire immediately,
1516 * or with HTTP-violations enabled the configured negative-TTL is observed
1517 */
1518 void
1519 StoreEntry::negativeCache()
1520 {
1521 // XXX: should make the default for expires 0 instead of -1
1522 // so we can distinguish "Expires: -1" from nothing.
1523 if (expires <= 0)
1524 #if USE_HTTP_VIOLATIONS
1525 expires = squid_curtime + Config.negativeTtl;
1526 #else
1527 expires = squid_curtime;
1528 #endif
1529 EBIT_SET(flags, ENTRY_NEGCACHED);
1530 }
1531
1532 void
1533 storeFreeMemory(void)
1534 {
1535 Store::Root(NULL);
1536 #if USE_CACHE_DIGESTS
1537
1538 if (store_digest)
1539 cacheDigestDestroy(store_digest);
1540
1541 #endif
1542
1543 store_digest = NULL;
1544 }
1545
1546 int
1547 expiresMoreThan(time_t expires, time_t when)
1548 {
1549 if (expires < 0) /* No Expires given */
1550 return 1;
1551
1552 return (expires > (squid_curtime + when));
1553 }
1554
1555 int
1556 StoreEntry::validToSend() const
1557 {
1558 if (EBIT_TEST(flags, RELEASE_REQUEST))
1559 return 0;
1560
1561 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1562 if (expires <= squid_curtime)
1563 return 0;
1564
1565 if (EBIT_TEST(flags, ENTRY_ABORTED))
1566 return 0;
1567
1568 // now check that the entry has a cache backing or is collapsed
1569 if (swap_filen > -1) // backed by a disk cache
1570 return 1;
1571
1572 if (swappingOut()) // will be backed by a disk cache
1573 return 1;
1574
1575 if (!mem_obj) // not backed by a memory cache and not collapsed
1576 return 0;
1577
1578 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1579 // disk cache backing that store_client constructor will assert. XXX: This
1580 // is wrong for range requests (that could feed off nibbled memory) and for
1581 // entries backed by the shared memory cache (that could, in theory, get
1582 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1583 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1584 return 0;
1585
1586 // The following check is correct but useless at this position. TODO: Move
1587 // it up when the shared memory cache can either replenish locally nibbled
1588 // bytes or, better, does not use local RAM copy at all.
1589 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1590 // return 1;
1591
1592 return 1;
1593 }
1594
1595 void
1596 StoreEntry::timestampsSet()
1597 {
1598 const HttpReply *reply = getReply();
1599 time_t served_date = reply->date;
1600 int age = reply->header.getInt(HDR_AGE);
1601 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1602 /* make sure that 0 <= served_date <= squid_curtime */
1603
1604 if (served_date < 0 || served_date > squid_curtime)
1605 served_date = squid_curtime;
1606
1607 /* Bug 1791:
1608 * If the returned Date: is more than 24 hours older than
1609 * the squid_curtime, then one of us needs to use NTP to set our
1610 * clock. We'll pretend that our clock is right.
1611 */
1612 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1613 served_date = squid_curtime;
1614
1615 /*
1616 * Compensate with Age header if origin server clock is ahead
1617 * of us and there is a cache in between us and the origin
1618 * server. But DONT compensate if the age value is larger than
1619 * squid_curtime because it results in a negative served_date.
1620 */
1621 if (age > squid_curtime - served_date)
1622 if (squid_curtime > age)
1623 served_date = squid_curtime - age;
1624
1625 // compensate for Squid-to-server and server-to-Squid delays
1626 if (mem_obj && mem_obj->request) {
1627 const time_t request_sent =
1628 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1629 if (0 < request_sent && request_sent < squid_curtime)
1630 served_date -= (squid_curtime - request_sent);
1631 }
1632
1633 if (reply->expires > 0 && reply->date > -1)
1634 expires = served_date + (reply->expires - reply->date);
1635 else
1636 expires = reply->expires;
1637
1638 lastmod = reply->last_modified;
1639
1640 timestamp = served_date;
1641 }
1642
1643 void
1644 StoreEntry::registerAbort(STABH * cb, void *data)
1645 {
1646 assert(mem_obj);
1647 assert(mem_obj->abort.callback == NULL);
1648 mem_obj->abort.callback = cb;
1649 mem_obj->abort.data = cbdataReference(data);
1650 }
1651
1652 void
1653 StoreEntry::unregisterAbort()
1654 {
1655 assert(mem_obj);
1656 if (mem_obj->abort.callback) {
1657 mem_obj->abort.callback = NULL;
1658 cbdataReferenceDone(mem_obj->abort.data);
1659 }
1660 }
1661
1662 void
1663 StoreEntry::dump(int l) const
1664 {
1665 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1666 debugs(20, l, "StoreEntry->next: " << next);
1667 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1668 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1669 debugs(20, l, "StoreEntry->lastref: " << lastref);
1670 debugs(20, l, "StoreEntry->expires: " << expires);
1671 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1672 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1673 debugs(20, l, "StoreEntry->refcount: " << refcount);
1674 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1675 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1676 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1677 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1678 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1679 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1680 debugs(20, l, "StoreEntry->store_status: " << store_status);
1681 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1682 }
1683
1684 /*
1685 * NOTE, this function assumes only two mem states
1686 */
1687 void
1688 StoreEntry::setMemStatus(mem_status_t new_status)
1689 {
1690 if (new_status == mem_status)
1691 return;
1692
1693 // are we using a shared memory cache?
1694 if (Config.memShared && IamWorkerProcess()) {
1695 // This method was designed to update replacement policy, not to
1696 // actually purge something from the memory cache (TODO: rename?).
1697 // Shared memory cache does not have a policy that needs updates.
1698 mem_status = new_status;
1699 return;
1700 }
1701
1702 assert(mem_obj != NULL);
1703
1704 if (new_status == IN_MEMORY) {
1705 assert(mem_obj->inmem_lo == 0);
1706
1707 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1708 debugs(20, 4, "not inserting special " << *this << " into policy");
1709 } else {
1710 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1711 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1712 }
1713
1714 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1715 } else {
1716 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1717 debugs(20, 4, "not removing special " << *this << " from policy");
1718 } else {
1719 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1720 debugs(20, 4, "removed " << *this);
1721 }
1722
1723 --hot_obj_count;
1724 }
1725
1726 mem_status = new_status;
1727 }
1728
1729 const char *
1730 StoreEntry::url() const
1731 {
1732 if (mem_obj == NULL)
1733 return "[null_mem_obj]";
1734 else
1735 return mem_obj->storeId();
1736 }
1737
1738 MemObject *
1739 StoreEntry::makeMemObject()
1740 {
1741 if (!mem_obj)
1742 mem_obj = new MemObject();
1743 return mem_obj;
1744 }
1745
1746 void
1747 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1748 {
1749 makeMemObject();
1750 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1751 }
1752
1753 /* this just sets DELAY_SENDING */
1754 void
1755 StoreEntry::buffer()
1756 {
1757 EBIT_SET(flags, DELAY_SENDING);
1758 }
1759
1760 /* this just clears DELAY_SENDING and Invokes the handlers */
1761 void
1762 StoreEntry::flush()
1763 {
1764 if (EBIT_TEST(flags, DELAY_SENDING)) {
1765 EBIT_CLR(flags, DELAY_SENDING);
1766 invokeHandlers();
1767 }
1768 }
1769
1770 int64_t
1771 StoreEntry::objectLen() const
1772 {
1773 assert(mem_obj != NULL);
1774 return mem_obj->object_sz;
1775 }
1776
1777 int64_t
1778 StoreEntry::contentLen() const
1779 {
1780 assert(mem_obj != NULL);
1781 assert(getReply() != NULL);
1782 return objectLen() - getReply()->hdr_sz;
1783 }
1784
1785 HttpReply const *
1786 StoreEntry::getReply () const
1787 {
1788 if (NULL == mem_obj)
1789 return NULL;
1790
1791 return mem_obj->getReply();
1792 }
1793
1794 void
1795 StoreEntry::reset()
1796 {
1797 assert (mem_obj);
1798 debugs(20, 3, "StoreEntry::reset: " << url());
1799 mem_obj->reset();
1800 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1801 rep->reset();
1802 expires = lastmod = timestamp = -1;
1803 }
1804
1805 /*
1806 * storeFsInit
1807 *
1808 * This routine calls the SETUP routine for each fs type.
1809 * I don't know where the best place for this is, and I'm not going to shuffle
1810 * around large chunks of code right now (that can be done once its working.)
1811 */
1812 void
1813 storeFsInit(void)
1814 {
1815 storeReplSetup();
1816 }
1817
1818 /*
1819 * called to add another store removal policy module
1820 */
1821 void
1822 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1823 {
1824 int i;
1825
1826 /* find the number of currently known repl types */
1827 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1828 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1829 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1830 return;
1831 }
1832 }
1833
1834 /* add the new type */
1835 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1836
1837 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1838
1839 storerepl_list[i].typestr = type;
1840
1841 storerepl_list[i].create = create;
1842 }
1843
1844 /*
1845 * Create a removal policy instance
1846 */
1847 RemovalPolicy *
1848 createRemovalPolicy(RemovalPolicySettings * settings)
1849 {
1850 storerepl_entry_t *r;
1851
1852 for (r = storerepl_list; r && r->typestr; ++r) {
1853 if (strcmp(r->typestr, settings->type) == 0)
1854 return r->create(settings->args);
1855 }
1856
1857 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1858 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1859 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1860 fatalf("ERROR: Unknown policy %s\n", settings->type);
1861 return NULL; /* NOTREACHED */
1862 }
1863
1864 #if 0
1865 void
1866 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1867 {
1868 if (e->swap_file_number == filn)
1869 return;
1870
1871 if (filn < 0) {
1872 assert(-1 == filn);
1873 storeDirMapBitReset(e->swap_file_number);
1874 storeDirLRUDelete(e);
1875 e->swap_file_number = -1;
1876 } else {
1877 assert(-1 == e->swap_file_number);
1878 storeDirMapBitSet(e->swap_file_number = filn);
1879 storeDirLRUAdd(e);
1880 }
1881 }
1882
1883 #endif
1884
1885 void
1886 StoreEntry::storeErrorResponse(HttpReply *reply)
1887 {
1888 lock("StoreEntry::storeErrorResponse");
1889 buffer();
1890 replaceHttpReply(reply);
1891 flush();
1892 complete();
1893 negativeCache();
1894 releaseRequest();
1895 unlock("StoreEntry::storeErrorResponse");
1896 }
1897
1898 /*
1899 * Replace a store entry with
1900 * a new reply. This eats the reply.
1901 */
1902 void
1903 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1904 {
1905 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1906
1907 if (!mem_obj) {
1908 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1909 return;
1910 }
1911
1912 mem_obj->replaceHttpReply(rep);
1913
1914 if (andStartWriting)
1915 startWriting();
1916 }
1917
1918 void
1919 StoreEntry::startWriting()
1920 {
1921 /* TODO: when we store headers serparately remove the header portion */
1922 /* TODO: mark the length of the headers ? */
1923 /* We ONLY want the headers */
1924
1925 assert (isEmpty());
1926 assert(mem_obj);
1927
1928 const HttpReply *rep = getReply();
1929 assert(rep);
1930
1931 buffer();
1932 rep->packHeadersInto(this);
1933 mem_obj->markEndOfReplyHeaders();
1934 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1935
1936 rep->body.packInto(this);
1937 }
1938
1939 char const *
1940 StoreEntry::getSerialisedMetaData()
1941 {
1942 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1943 int swap_hdr_sz;
1944 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1945 storeSwapTLVFree(tlv_list);
1946 assert (swap_hdr_sz >= 0);
1947 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1948 return result;
1949 }
1950
1951 /**
1952 * Abandon the transient entry our worker has created if neither the shared
1953 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1954 * any, should notice and use Plan B instead of getting stuck waiting for us
1955 * to start swapping the entry out.
1956 */
1957 void
1958 StoreEntry::transientsAbandonmentCheck()
1959 {
1960 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1961 mem_obj->xitTable.index >= 0 && // other workers may be interested
1962 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1963 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1964 debugs(20, 7, "cannot be shared: " << *this);
1965 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1966 Store::Root().transientsAbandon(*this);
1967 }
1968 }
1969
1970 void
1971 StoreEntry::memOutDecision(const bool)
1972 {
1973 transientsAbandonmentCheck();
1974 }
1975
1976 void
1977 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1978 {
1979 // Abandon our transient entry if neither shared memory nor disk wants it.
1980 assert(mem_obj);
1981 mem_obj->swapout.decision = decision;
1982 transientsAbandonmentCheck();
1983 }
1984
1985 void
1986 StoreEntry::trimMemory(const bool preserveSwappable)
1987 {
1988 /*
1989 * DPW 2007-05-09
1990 * Bug #1943. We must not let go any data for IN_MEMORY
1991 * objects. We have to wait until the mem_status changes.
1992 */
1993 if (mem_status == IN_MEMORY)
1994 return;
1995
1996 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1997 return; // cannot trim because we do not load them again
1998
1999 if (preserveSwappable)
2000 mem_obj->trimSwappable();
2001 else
2002 mem_obj->trimUnSwappable();
2003
2004 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
2005 }
2006
2007 bool
2008 StoreEntry::modifiedSince(HttpRequest * request) const
2009 {
2010 int object_length;
2011 time_t mod_time = lastmod;
2012
2013 if (mod_time < 0)
2014 mod_time = timestamp;
2015
2016 debugs(88, 3, "modifiedSince: '" << url() << "'");
2017
2018 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
2019
2020 if (mod_time < 0)
2021 return true;
2022
2023 /* Find size of the object */
2024 object_length = getReply()->content_length;
2025
2026 if (object_length < 0)
2027 object_length = contentLen();
2028
2029 if (mod_time > request->ims) {
2030 debugs(88, 3, "--> YES: entry newer than client");
2031 return true;
2032 } else if (mod_time < request->ims) {
2033 debugs(88, 3, "--> NO: entry older than client");
2034 return false;
2035 } else if (request->imslen < 0) {
2036 debugs(88, 3, "--> NO: same LMT, no client length");
2037 return false;
2038 } else if (request->imslen == object_length) {
2039 debugs(88, 3, "--> NO: same LMT, same length");
2040 return false;
2041 } else {
2042 debugs(88, 3, "--> YES: same LMT, different length");
2043 return true;
2044 }
2045 }
2046
2047 bool
2048 StoreEntry::hasEtag(ETag &etag) const
2049 {
2050 if (const HttpReply *reply = getReply()) {
2051 etag = reply->header.getETag(HDR_ETAG);
2052 if (etag.str)
2053 return true;
2054 }
2055 return false;
2056 }
2057
2058 bool
2059 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2060 {
2061 const String reqETags = request.header.getList(HDR_IF_MATCH);
2062 return hasOneOfEtags(reqETags, false);
2063 }
2064
2065 bool
2066 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2067 {
2068 const String reqETags = request.header.getList(HDR_IF_NONE_MATCH);
2069 // weak comparison is allowed only for HEAD or full-body GET requests
2070 const bool allowWeakMatch = !request.flags.isRanged &&
2071 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
2072 return hasOneOfEtags(reqETags, allowWeakMatch);
2073 }
2074
2075 /// whether at least one of the request ETags matches entity ETag
2076 bool
2077 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2078 {
2079 const ETag repETag = getReply()->header.getETag(HDR_ETAG);
2080 if (!repETag.str)
2081 return strListIsMember(&reqETags, "*", ',');
2082
2083 bool matched = false;
2084 const char *pos = NULL;
2085 const char *item;
2086 int ilen;
2087 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2088 if (!strncmp(item, "*", ilen))
2089 matched = true;
2090 else {
2091 String str;
2092 str.append(item, ilen);
2093 ETag reqETag;
2094 if (etagParseInit(&reqETag, str.termedBuf())) {
2095 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2096 etagIsStrongEqual(repETag, reqETag);
2097 }
2098 }
2099 }
2100 return matched;
2101 }
2102
2103 SwapDir::Pointer
2104 StoreEntry::store() const
2105 {
2106 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2107 return INDEXSD(swap_dirn);
2108 }
2109
2110 void
2111 StoreEntry::unlink()
2112 {
2113 store()->unlink(*this); // implies disconnect()
2114 swap_filen = -1;
2115 swap_dirn = -1;
2116 swap_status = SWAPOUT_NONE;
2117 }
2118
2119 /*
2120 * return true if the entry is in a state where
2121 * it can accept more data (ie with write() method)
2122 */
2123 bool
2124 StoreEntry::isAccepting() const
2125 {
2126 if (STORE_PENDING != store_status)
2127 return false;
2128
2129 if (EBIT_TEST(flags, ENTRY_ABORTED))
2130 return false;
2131
2132 return true;
2133 }
2134
2135 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2136 {
2137 os << "e:";
2138
2139 if (e.mem_obj) {
2140 if (e.mem_obj->xitTable.index > -1)
2141 os << 't' << e.mem_obj->xitTable.index;
2142 if (e.mem_obj->memCache.index > -1)
2143 os << 'm' << e.mem_obj->memCache.index;
2144 }
2145 if (e.swap_filen > -1 || e.swap_dirn > -1)
2146 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2147
2148 os << '=';
2149
2150 // print only non-default status values, using unique letters
2151 if (e.mem_status != NOT_IN_MEMORY ||
2152 e.store_status != STORE_PENDING ||
2153 e.swap_status != SWAPOUT_NONE ||
2154 e.ping_status != PING_NONE) {
2155 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2156 if (e.store_status != STORE_PENDING) os << 's';
2157 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2158 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2159 }
2160
2161 // print only set flags, using unique letters
2162 if (e.flags) {
2163 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2164 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2165 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2166 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2167 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2168 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2169 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2170 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2171 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2172 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2173 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2174 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2175 }
2176
2177 if (e.mem_obj && e.mem_obj->smpCollapsed)
2178 os << 'O';
2179
2180 return os << '/' << &e << '*' << e.locks();
2181 }
2182
2183 /* NullStoreEntry */
2184
2185 NullStoreEntry NullStoreEntry::_instance;
2186
2187 NullStoreEntry *
2188 NullStoreEntry::getInstance()
2189 {
2190 return &_instance;
2191 }
2192
2193 char const *
2194 NullStoreEntry::getMD5Text() const
2195 {
2196 return "N/A";
2197 }
2198
2199 void
2200 NullStoreEntry::operator delete(void*)
2201 {
2202 fatal ("Attempt to delete NullStoreEntry\n");
2203 }
2204
2205 char const *
2206 NullStoreEntry::getSerialisedMetaData()
2207 {
2208 return NULL;
2209 }
2210