]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store.cc
Merged from trunk rev.14404
[thirdparty/squid.git] / src / store.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "CacheDigest.h"
13 #include "CacheManager.h"
14 #include "comm/Connection.h"
15 #include "comm/Read.h"
16 #include "ETag.h"
17 #include "event.h"
18 #include "fde.h"
19 #include "globals.h"
20 #include "http.h"
21 #include "HttpReply.h"
22 #include "HttpRequest.h"
23 #include "mem_node.h"
24 #include "MemObject.h"
25 #include "mgr/Registration.h"
26 #include "mgr/StoreIoAction.h"
27 #include "profiler/Profiler.h"
28 #include "repl_modules.h"
29 #include "RequestFlags.h"
30 #include "SquidConfig.h"
31 #include "SquidTime.h"
32 #include "StatCounters.h"
33 #include "stmem.h"
34 #include "Store.h"
35 #include "store_digest.h"
36 #include "store_key_md5.h"
37 #include "store_key_md5.h"
38 #include "store_log.h"
39 #include "store_rebuild.h"
40 #include "StoreClient.h"
41 #include "StoreIOState.h"
42 #include "StoreMeta.h"
43 #include "StrList.h"
44 #include "swap_log_op.h"
45 #include "SwapDir.h"
46 #include "tools.h"
47 #if USE_DELAY_POOLS
48 #include "DelayPools.h"
49 #endif
50
51 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
52 * XXX: convert to MEMPROXY_CLASS() API
53 */
54 #include "mem/Pool.h"
55
56 #include <climits>
57 #include <stack>
58
59 #define REBUILD_TIMESTAMP_DELTA_MAX 2
60
61 #define STORE_IN_MEM_BUCKETS (229)
62
63 /** \todo Convert these string constants to enum string-arrays generated */
64
65 const char *memStatusStr[] = {
66 "NOT_IN_MEMORY",
67 "IN_MEMORY"
68 };
69
70 const char *pingStatusStr[] = {
71 "PING_NONE",
72 "PING_WAITING",
73 "PING_DONE"
74 };
75
76 const char *storeStatusStr[] = {
77 "STORE_OK",
78 "STORE_PENDING"
79 };
80
81 const char *swapStatusStr[] = {
82 "SWAPOUT_NONE",
83 "SWAPOUT_WRITING",
84 "SWAPOUT_DONE"
85 };
86
87 /*
88 * This defines an repl type
89 */
90
91 typedef struct _storerepl_entry storerepl_entry_t;
92
93 struct _storerepl_entry {
94 const char *typestr;
95 REMOVALPOLICYCREATE *create;
96 };
97
98 static storerepl_entry_t *storerepl_list = NULL;
99
100 /*
101 * local function prototypes
102 */
103 static int getKeyCounter(void);
104 static OBJH storeCheckCachableStats;
105 static EVH storeLateRelease;
106
107 /*
108 * local variables
109 */
110 static std::stack<StoreEntry*> LateReleaseStack;
111 MemAllocator *StoreEntry::pool = NULL;
112
113 StorePointer Store::CurrentRoot = NULL;
114
115 void
116 Store::Root(Store * aRoot)
117 {
118 CurrentRoot = aRoot;
119 }
120
121 void
122 Store::Root(StorePointer aRoot)
123 {
124 Root(aRoot.getRaw());
125 }
126
127 void
128 Store::Stats(StoreEntry * output)
129 {
130 assert(output);
131 Root().stat(*output);
132 }
133
134 void
135 Store::create()
136 {}
137
138 void
139 Store::diskFull()
140 {}
141
142 void
143 Store::sync()
144 {}
145
146 void
147 Store::unlink(StoreEntry &)
148 {
149 fatal("Store::unlink on invalid Store\n");
150 }
151
152 void
153 StoreEntry::makePublic()
154 {
155 /* This object can be cached for a long time */
156
157 if (!EBIT_TEST(flags, RELEASE_REQUEST))
158 setPublicKey();
159 }
160
161 void
162 StoreEntry::makePrivate()
163 {
164 /* This object should never be cached at all */
165 expireNow();
166 releaseRequest(); /* delete object when not used */
167 }
168
169 void
170 StoreEntry::cacheNegatively()
171 {
172 /* This object may be negatively cached */
173 negativeCache();
174 makePublic();
175 }
176
177 size_t
178 StoreEntry::inUseCount()
179 {
180 if (!pool)
181 return 0;
182 return pool->getInUseCount();
183 }
184
185 const char *
186 StoreEntry::getMD5Text() const
187 {
188 return storeKeyText((const cache_key *)key);
189 }
190
191 #include "comm.h"
192
193 void
194 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
195 {
196 StoreEntry *anEntry = (StoreEntry *)theContext;
197 anEntry->delayAwareRead(aRead.conn,
198 aRead.buf,
199 aRead.len,
200 aRead.callback);
201 }
202
203 void
204 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
205 {
206 size_t amountToRead = bytesWanted(Range<size_t>(0, len));
207 /* sketch: readdeferer* = getdeferer.
208 * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
209 */
210
211 if (amountToRead == 0) {
212 assert (mem_obj);
213 /* read ahead limit */
214 /* Perhaps these two calls should both live in MemObject */
215 #if USE_DELAY_POOLS
216 if (!mem_obj->readAheadPolicyCanRead()) {
217 #endif
218 mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
219 return;
220 #if USE_DELAY_POOLS
221 }
222
223 /* delay id limit */
224 mem_obj->mostBytesAllowed().delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
225 return;
226
227 #endif
228
229 }
230
231 if (fd_table[conn->fd].closing()) {
232 // Readers must have closing callbacks if they want to be notified. No
233 // readers appeared to care around 2009/12/14 as they skipped reading
234 // for other reasons. Closing may already be true at the delyaAwareRead
235 // call time or may happen while we wait after delayRead() above.
236 debugs(20, 3, HERE << "wont read from closing " << conn << " for " <<
237 callback);
238 return; // the read callback will never be called
239 }
240
241 comm_read(conn, buf, amountToRead, callback);
242 }
243
244 size_t
245 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
246 {
247 if (mem_obj == NULL)
248 return aRange.end;
249
250 #if URL_CHECKSUM_DEBUG
251
252 mem_obj->checkUrlChecksum();
253
254 #endif
255
256 if (!mem_obj->readAheadPolicyCanRead())
257 return 0;
258
259 return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
260 }
261
262 bool
263 StoreEntry::checkDeferRead(int) const
264 {
265 return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
266 }
267
268 void
269 StoreEntry::setNoDelay(bool const newValue)
270 {
271 if (mem_obj)
272 mem_obj->setNoDelay(newValue);
273 }
274
275 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
276 // open swapin file, aggressively trim memory, and ignore read-ahead gap.
277 // It does not mean we will read from disk exclusively (or at all!).
278 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
279 // XXX: Collapsed clients cannot predict their type.
280 store_client_t
281 StoreEntry::storeClientType() const
282 {
283 /* The needed offset isn't in memory
284 * XXX TODO: this is wrong for range requests
285 * as the needed offset may *not* be 0, AND
286 * offset 0 in the memory object is the HTTP headers.
287 */
288
289 assert(mem_obj);
290
291 if (mem_obj->inmem_lo)
292 return STORE_DISK_CLIENT;
293
294 if (EBIT_TEST(flags, ENTRY_ABORTED)) {
295 /* I don't think we should be adding clients to aborted entries */
296 debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
297 return STORE_MEM_CLIENT;
298 }
299
300 if (store_status == STORE_OK) {
301 /* the object has completed. */
302
303 if (mem_obj->inmem_lo == 0 && !isEmpty()) {
304 if (swap_status == SWAPOUT_DONE) {
305 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
306 if (mem_obj->endOffset() == mem_obj->object_sz) {
307 /* hot object fully swapped in (XXX: or swapped out?) */
308 return STORE_MEM_CLIENT;
309 }
310 } else {
311 /* Memory-only, or currently being swapped out */
312 return STORE_MEM_CLIENT;
313 }
314 }
315 return STORE_DISK_CLIENT;
316 }
317
318 /* here and past, entry is STORE_PENDING */
319 /*
320 * If this is the first client, let it be the mem client
321 */
322 if (mem_obj->nclients == 1)
323 return STORE_MEM_CLIENT;
324
325 /*
326 * If there is no disk file to open yet, we must make this a
327 * mem client. If we can't open the swapin file before writing
328 * to the client, there is no guarantee that we will be able
329 * to open it later when we really need it.
330 */
331 if (swap_status == SWAPOUT_NONE)
332 return STORE_MEM_CLIENT;
333
334 /*
335 * otherwise, make subsequent clients read from disk so they
336 * can not delay the first, and vice-versa.
337 */
338 return STORE_DISK_CLIENT;
339 }
340
341 StoreEntry::StoreEntry() :
342 mem_obj(NULL),
343 timestamp(-1),
344 lastref(-1),
345 expires(-1),
346 lastmod(-1),
347 swap_file_sz(0),
348 refcount(0),
349 flags(0),
350 swap_filen(-1),
351 swap_dirn(-1),
352 mem_status(NOT_IN_MEMORY),
353 ping_status(PING_NONE),
354 store_status(STORE_PENDING),
355 swap_status(SWAPOUT_NONE),
356 lock_count(0)
357 {
358 debugs(20, 5, "StoreEntry constructed, this=" << this);
359 }
360
361 StoreEntry::~StoreEntry()
362 {
363 debugs(20, 5, "StoreEntry destructed, this=" << this);
364 }
365
366 #if USE_ADAPTATION
367 void
368 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
369 {
370 if (!deferredProducer)
371 deferredProducer = producer;
372 else
373 debugs(20, 5, HERE << "Deferred producer call is allready set to: " <<
374 *deferredProducer << ", requested call: " << *producer);
375 }
376
377 void
378 StoreEntry::kickProducer()
379 {
380 if (deferredProducer != NULL) {
381 ScheduleCallHere(deferredProducer);
382 deferredProducer = NULL;
383 }
384 }
385 #endif
386
387 void
388 StoreEntry::destroyMemObject()
389 {
390 debugs(20, 3, HERE << "destroyMemObject " << mem_obj);
391
392 if (MemObject *mem = mem_obj) {
393 // Store::Root() is FATALly missing during shutdown
394 if (mem->xitTable.index >= 0 && !shutting_down)
395 Store::Root().transientsDisconnect(*mem);
396 if (mem->memCache.index >= 0 && !shutting_down)
397 Store::Root().memoryDisconnect(*this);
398
399 setMemStatus(NOT_IN_MEMORY);
400 mem_obj = NULL;
401 delete mem;
402 }
403 }
404
405 void
406 destroyStoreEntry(void *data)
407 {
408 debugs(20, 3, HERE << "destroyStoreEntry: destroying " << data);
409 StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
410 assert(e != NULL);
411
412 if (e == NullStoreEntry::getInstance())
413 return;
414
415 // Store::Root() is FATALly missing during shutdown
416 if (e->swap_filen >= 0 && !shutting_down) {
417 SwapDir &sd = dynamic_cast<SwapDir&>(*e->store());
418 sd.disconnect(*e);
419 }
420
421 e->destroyMemObject();
422
423 e->hashDelete();
424
425 assert(e->key == NULL);
426
427 delete e;
428 }
429
430 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
431
432 void
433 StoreEntry::hashInsert(const cache_key * someKey)
434 {
435 debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
436 key = storeKeyDup(someKey);
437 hash_join(store_table, this);
438 }
439
440 void
441 StoreEntry::hashDelete()
442 {
443 if (key) { // some test cases do not create keys and do not hashInsert()
444 hash_remove_link(store_table, this);
445 storeKeyFree((const cache_key *)key);
446 key = NULL;
447 }
448 }
449
450 /* -------------------------------------------------------------------------- */
451
452 /* get rid of memory copy of the object */
453 void
454 StoreEntry::purgeMem()
455 {
456 if (mem_obj == NULL)
457 return;
458
459 debugs(20, 3, "StoreEntry::purgeMem: Freeing memory-copy of " << getMD5Text());
460
461 Store::Root().memoryUnlink(*this);
462
463 if (swap_status != SWAPOUT_DONE)
464 release();
465 }
466
467 void
468 StoreEntry::lock(const char *context)
469 {
470 ++lock_count;
471 debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
472 }
473
474 void
475 StoreEntry::touch()
476 {
477 lastref = squid_curtime;
478 Store::Root().reference(*this);
479 }
480
481 void
482 StoreEntry::setReleaseFlag()
483 {
484 if (EBIT_TEST(flags, RELEASE_REQUEST))
485 return;
486
487 debugs(20, 3, "StoreEntry::setReleaseFlag: '" << getMD5Text() << "'");
488
489 EBIT_SET(flags, RELEASE_REQUEST);
490
491 Store::Root().markForUnlink(*this);
492 }
493
494 void
495 StoreEntry::releaseRequest()
496 {
497 if (EBIT_TEST(flags, RELEASE_REQUEST))
498 return;
499
500 setReleaseFlag(); // makes validToSend() false, preventing future hits
501
502 setPrivateKey();
503 }
504
505 int
506 StoreEntry::unlock(const char *context)
507 {
508 debugs(20, 3, (context ? context : "somebody") <<
509 " unlocking key " << getMD5Text() << ' ' << *this);
510 assert(lock_count > 0);
511 --lock_count;
512
513 if (lock_count)
514 return (int) lock_count;
515
516 if (store_status == STORE_PENDING)
517 setReleaseFlag();
518
519 assert(storePendingNClients(this) == 0);
520
521 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
522 this->release();
523 return 0;
524 }
525
526 if (EBIT_TEST(flags, KEY_PRIVATE))
527 debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
528
529 Store::Root().handleIdleEntry(*this); // may delete us
530 return 0;
531 }
532
533 void
534 StoreEntry::getPublicByRequestMethod (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
535 {
536 assert (aClient);
537 StoreEntry *result = storeGetPublicByRequestMethod( request, method);
538
539 if (!result)
540 aClient->created (NullStoreEntry::getInstance());
541 else
542 aClient->created (result);
543 }
544
545 void
546 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
547 {
548 assert (aClient);
549 StoreEntry *result = storeGetPublicByRequest (request);
550
551 if (!result)
552 result = NullStoreEntry::getInstance();
553
554 aClient->created (result);
555 }
556
557 void
558 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
559 {
560 assert (aClient);
561 StoreEntry *result = storeGetPublic (uri, method);
562
563 if (!result)
564 result = NullStoreEntry::getInstance();
565
566 aClient->created (result);
567 }
568
569 StoreEntry *
570 storeGetPublic(const char *uri, const HttpRequestMethod& method)
571 {
572 return Store::Root().get(storeKeyPublic(uri, method));
573 }
574
575 StoreEntry *
576 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method)
577 {
578 return Store::Root().get(storeKeyPublicByRequestMethod(req, method));
579 }
580
581 StoreEntry *
582 storeGetPublicByRequest(HttpRequest * req)
583 {
584 StoreEntry *e = storeGetPublicByRequestMethod(req, req->method);
585
586 if (e == NULL && req->method == Http::METHOD_HEAD)
587 /* We can generate a HEAD reply from a cached GET object */
588 e = storeGetPublicByRequestMethod(req, Http::METHOD_GET);
589
590 return e;
591 }
592
593 static int
594 getKeyCounter(void)
595 {
596 static int key_counter = 0;
597
598 if (++key_counter < 0)
599 key_counter = 1;
600
601 return key_counter;
602 }
603
604 /* RBC 20050104 AFAICT this should become simpler:
605 * rather than reinserting with a special key it should be marked
606 * as 'released' and then cleaned up when refcounting indicates.
607 * the StoreHashIndex could well implement its 'released' in the
608 * current manner.
609 * Also, clean log writing should skip over ia,t
610 * Otherwise, we need a 'remove from the index but not the store
611 * concept'.
612 */
613 void
614 StoreEntry::setPrivateKey()
615 {
616 const cache_key *newkey;
617
618 if (key && EBIT_TEST(flags, KEY_PRIVATE))
619 return; /* is already private */
620
621 if (key) {
622 setReleaseFlag(); // will markForUnlink(); all caches/workers will know
623
624 // TODO: move into SwapDir::markForUnlink() already called by Root()
625 if (swap_filen > -1)
626 storeDirSwapLog(this, SWAP_LOG_DEL);
627
628 hashDelete();
629 }
630
631 if (mem_obj && mem_obj->hasUris()) {
632 mem_obj->id = getKeyCounter();
633 newkey = storeKeyPrivate(mem_obj->storeId(), mem_obj->method, mem_obj->id);
634 } else {
635 newkey = storeKeyPrivate("JUNK", Http::METHOD_NONE, getKeyCounter());
636 }
637
638 assert(hash_lookup(store_table, newkey) == NULL);
639 EBIT_SET(flags, KEY_PRIVATE);
640 hashInsert(newkey);
641 }
642
643 void
644 StoreEntry::setPublicKey()
645 {
646 const cache_key *newkey;
647
648 if (key && !EBIT_TEST(flags, KEY_PRIVATE))
649 return; /* is already public */
650
651 assert(mem_obj);
652
653 /*
654 * We can't make RELEASE_REQUEST objects public. Depending on
655 * when RELEASE_REQUEST gets set, we might not be swapping out
656 * the object. If we're not swapping out, then subsequent
657 * store clients won't be able to access object data which has
658 * been freed from memory.
659 *
660 * If RELEASE_REQUEST is set, setPublicKey() should not be called.
661 */
662 #if MORE_DEBUG_OUTPUT
663
664 if (EBIT_TEST(flags, RELEASE_REQUEST))
665 debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
666
667 #endif
668
669 assert(!EBIT_TEST(flags, RELEASE_REQUEST));
670
671 if (mem_obj->request) {
672 HttpRequest *request = mem_obj->request;
673
674 if (!mem_obj->vary_headers) {
675 /* First handle the case where the object no longer varies */
676 safe_free(request->vary_headers);
677 } else {
678 if (request->vary_headers && strcmp(request->vary_headers, mem_obj->vary_headers) != 0) {
679 /* Oops.. the variance has changed. Kill the base object
680 * to record the new variance key
681 */
682 safe_free(request->vary_headers); /* free old "bad" variance key */
683 if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
684 pe->release();
685 }
686
687 /* Make sure the request knows the variance status */
688 if (!request->vary_headers) {
689 const char *vary = httpMakeVaryMark(request, mem_obj->getReply());
690
691 if (vary)
692 request->vary_headers = xstrdup(vary);
693 }
694 }
695
696 // TODO: storeGetPublic() calls below may create unlocked entries.
697 // We should add/use storeHas() API or lock/unlock those entries.
698 if (mem_obj->vary_headers && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
699 /* Create "vary" base object */
700 String vary;
701 StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
702 /* We are allowed to do this typecast */
703 HttpReply *rep = new HttpReply;
704 rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
705 vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
706
707 if (vary.size()) {
708 /* Again, we own this structure layout */
709 rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
710 vary.clean();
711 }
712
713 #if X_ACCELERATOR_VARY
714 vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
715
716 if (vary.size() > 0) {
717 /* Again, we own this structure layout */
718 rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
719 vary.clean();
720 }
721
722 #endif
723 pe->replaceHttpReply(rep, false); // no write until key is public
724
725 pe->timestampsSet();
726
727 pe->makePublic();
728
729 pe->startWriting(); // after makePublic()
730
731 pe->complete();
732
733 pe->unlock("StoreEntry::setPublicKey+Vary");
734 }
735
736 newkey = storeKeyPublicByRequest(mem_obj->request);
737 } else
738 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
739
740 if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
741 debugs(20, 3, "Making old " << *e2 << " private.");
742 e2->setPrivateKey();
743 e2->release();
744
745 if (mem_obj->request)
746 newkey = storeKeyPublicByRequest(mem_obj->request);
747 else
748 newkey = storeKeyPublic(mem_obj->storeId(), mem_obj->method);
749 }
750
751 if (key)
752 hashDelete();
753
754 EBIT_CLR(flags, KEY_PRIVATE);
755
756 hashInsert(newkey);
757
758 if (swap_filen > -1)
759 storeDirSwapLog(this, SWAP_LOG_ADD);
760 }
761
762 StoreEntry *
763 storeCreatePureEntry(const char *url, const char *log_url, const RequestFlags &flags, const HttpRequestMethod& method)
764 {
765 StoreEntry *e = NULL;
766 debugs(20, 3, "storeCreateEntry: '" << url << "'");
767
768 e = new StoreEntry();
769 e->makeMemObject();
770 e->mem_obj->setUris(url, log_url, method);
771
772 if (flags.cachable) {
773 EBIT_CLR(e->flags, RELEASE_REQUEST);
774 } else {
775 e->releaseRequest();
776 }
777
778 e->store_status = STORE_PENDING;
779 e->refcount = 0;
780 e->lastref = squid_curtime;
781 e->timestamp = -1; /* set in StoreEntry::timestampsSet() */
782 e->ping_status = PING_NONE;
783 EBIT_SET(e->flags, ENTRY_VALIDATED);
784 return e;
785 }
786
787 StoreEntry *
788 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
789 {
790 StoreEntry *e = storeCreatePureEntry(url, logUrl, flags, method);
791 e->lock("storeCreateEntry");
792
793 if (neighbors_do_private_keys || !flags.hierarchical)
794 e->setPrivateKey();
795 else
796 e->setPublicKey();
797
798 return e;
799 }
800
801 /* Mark object as expired */
802 void
803 StoreEntry::expireNow()
804 {
805 debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
806 expires = squid_curtime;
807 }
808
809 void
810 StoreEntry::write (StoreIOBuffer writeBuffer)
811 {
812 assert(mem_obj != NULL);
813 /* This assert will change when we teach the store to update */
814 PROF_start(StoreEntry_write);
815 assert(store_status == STORE_PENDING);
816
817 // XXX: caller uses content offset, but we also store headers
818 if (const HttpReply *reply = mem_obj->getReply())
819 writeBuffer.offset += reply->hdr_sz;
820
821 debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
822 PROF_stop(StoreEntry_write);
823 storeGetMemSpace(writeBuffer.length);
824 mem_obj->write(writeBuffer);
825
826 if (!EBIT_TEST(flags, DELAY_SENDING))
827 invokeHandlers();
828 }
829
830 /* Append incoming data from a primary server to an entry. */
831 void
832 StoreEntry::append(char const *buf, int len)
833 {
834 assert(mem_obj != NULL);
835 assert(len >= 0);
836 assert(store_status == STORE_PENDING);
837
838 StoreIOBuffer tempBuffer;
839 tempBuffer.data = (char *)buf;
840 tempBuffer.length = len;
841 /*
842 * XXX sigh, offset might be < 0 here, but it gets "corrected"
843 * later. This offset crap is such a mess.
844 */
845 tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
846 write(tempBuffer);
847 }
848
849 void
850 StoreEntry::vappendf(const char *fmt, va_list vargs)
851 {
852 LOCAL_ARRAY(char, buf, 4096);
853 *buf = 0;
854 int x;
855
856 #ifdef VA_COPY
857 va_args ap;
858 /* Fix of bug 753r. The value of vargs is undefined
859 * after vsnprintf() returns. Make a copy of vargs
860 * incase we loop around and call vsnprintf() again.
861 */
862 VA_COPY(ap,vargs);
863 errno = 0;
864 if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
865 fatal(xstrerr(errno));
866 return;
867 }
868 va_end(ap);
869 #else /* VA_COPY */
870 errno = 0;
871 if ((x = vsnprintf(buf, sizeof(buf), fmt, vargs)) < 0) {
872 fatal(xstrerr(errno));
873 return;
874 }
875 #endif /*VA_COPY*/
876
877 if (x < static_cast<int>(sizeof(buf))) {
878 append(buf, x);
879 return;
880 }
881
882 // okay, do it the slow way.
883 char *buf2 = new char[x+1];
884 int y = vsnprintf(buf2, x+1, fmt, vargs);
885 assert(y >= 0 && y == x);
886 append(buf2, y);
887 delete[] buf2;
888 }
889
890 // deprecated. use StoreEntry::appendf() instead.
891 void
892 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
893 {
894 va_list args;
895 va_start(args, fmt);
896 e->vappendf(fmt, args);
897 va_end(args);
898 }
899
900 // deprecated. use StoreEntry::appendf() instead.
901 void
902 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
903 {
904 e->vappendf(fmt, vargs);
905 }
906
907 struct _store_check_cachable_hist {
908
909 struct {
910 int non_get;
911 int not_entry_cachable;
912 int wrong_content_length;
913 int negative_cached;
914 int too_big;
915 int too_small;
916 int private_key;
917 int too_many_open_files;
918 int too_many_open_fds;
919 int missing_parts;
920 } no;
921
922 struct {
923 int Default;
924 } yes;
925 } store_check_cachable_hist;
926
927 int
928 storeTooManyDiskFilesOpen(void)
929 {
930 if (Config.max_open_disk_fds == 0)
931 return 0;
932
933 if (store_open_disk_fd > Config.max_open_disk_fds)
934 return 1;
935
936 return 0;
937 }
938
939 int
940 StoreEntry::checkTooSmall()
941 {
942 if (EBIT_TEST(flags, ENTRY_SPECIAL))
943 return 0;
944
945 if (STORE_OK == store_status)
946 if (mem_obj->object_sz >= 0 &&
947 mem_obj->object_sz < Config.Store.minObjectSize)
948 return 1;
949 if (getReply()->content_length > -1)
950 if (getReply()->content_length < Config.Store.minObjectSize)
951 return 1;
952 return 0;
953 }
954
955 bool
956 StoreEntry::checkTooBig() const
957 {
958 if (mem_obj->endOffset() > store_maxobjsize)
959 return true;
960
961 if (getReply()->content_length < 0)
962 return false;
963
964 return (getReply()->content_length > store_maxobjsize);
965 }
966
967 // TODO: move "too many open..." checks outside -- we are called too early/late
968 bool
969 StoreEntry::checkCachable()
970 {
971 // XXX: This method is used for both memory and disk caches, but some
972 // checks are specific to disk caches. Move them to mayStartSwapOut().
973
974 // XXX: This method may be called several times, sometimes with different
975 // outcomes, making store_check_cachable_hist counters misleading.
976
977 // check this first to optimize handling of repeated calls for uncachables
978 if (EBIT_TEST(flags, RELEASE_REQUEST)) {
979 debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
980 ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
981 return 0; // avoid rerequesting release below
982 }
983
984 #if CACHE_ALL_METHODS
985
986 if (mem_obj->method != Http::METHOD_GET) {
987 debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
988 ++store_check_cachable_hist.no.non_get;
989 } else
990 #endif
991 if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
992 debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
993 ++store_check_cachable_hist.no.wrong_content_length;
994 } else if (EBIT_TEST(flags, ENTRY_NEGCACHED)) {
995 debugs(20, 3, "StoreEntry::checkCachable: NO: negative cached");
996 ++store_check_cachable_hist.no.negative_cached;
997 return 0; /* avoid release call below */
998 } else if (!mem_obj || !getReply()) {
999 // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1000 // this segfault protection, but how can we get such a HIT?
1001 debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1002 ++store_check_cachable_hist.no.missing_parts;
1003 } else if (checkTooBig()) {
1004 debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1005 ++store_check_cachable_hist.no.too_big;
1006 } else if (checkTooSmall()) {
1007 debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1008 ++store_check_cachable_hist.no.too_small;
1009 } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1010 debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1011 ++store_check_cachable_hist.no.private_key;
1012 } else if (swap_status != SWAPOUT_NONE) {
1013 /*
1014 * here we checked the swap_status because the remaining
1015 * cases are only relevant only if we haven't started swapping
1016 * out the object yet.
1017 */
1018 return 1;
1019 } else if (storeTooManyDiskFilesOpen()) {
1020 debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1021 ++store_check_cachable_hist.no.too_many_open_files;
1022 } else if (fdNFree() < RESERVED_FD) {
1023 debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1024 ++store_check_cachable_hist.no.too_many_open_fds;
1025 } else {
1026 ++store_check_cachable_hist.yes.Default;
1027 return 1;
1028 }
1029
1030 releaseRequest();
1031 return 0;
1032 }
1033
1034 void
1035 storeCheckCachableStats(StoreEntry *sentry)
1036 {
1037 storeAppendPrintf(sentry, "Category\t Count\n");
1038
1039 #if CACHE_ALL_METHODS
1040
1041 storeAppendPrintf(sentry, "no.non_get\t%d\n",
1042 store_check_cachable_hist.no.non_get);
1043 #endif
1044
1045 storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1046 store_check_cachable_hist.no.not_entry_cachable);
1047 storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1048 store_check_cachable_hist.no.wrong_content_length);
1049 storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1050 store_check_cachable_hist.no.negative_cached);
1051 storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1052 store_check_cachable_hist.no.missing_parts);
1053 storeAppendPrintf(sentry, "no.too_big\t%d\n",
1054 store_check_cachable_hist.no.too_big);
1055 storeAppendPrintf(sentry, "no.too_small\t%d\n",
1056 store_check_cachable_hist.no.too_small);
1057 storeAppendPrintf(sentry, "no.private_key\t%d\n",
1058 store_check_cachable_hist.no.private_key);
1059 storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1060 store_check_cachable_hist.no.too_many_open_files);
1061 storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1062 store_check_cachable_hist.no.too_many_open_fds);
1063 storeAppendPrintf(sentry, "yes.default\t%d\n",
1064 store_check_cachable_hist.yes.Default);
1065 }
1066
1067 void
1068 StoreEntry::lengthWentBad(const char *reason)
1069 {
1070 debugs(20, 3, "because " << reason << ": " << *this);
1071 EBIT_SET(flags, ENTRY_BAD_LENGTH);
1072 releaseRequest();
1073 }
1074
1075 void
1076 StoreEntry::complete()
1077 {
1078 debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1079
1080 if (store_status != STORE_PENDING) {
1081 /*
1082 * if we're not STORE_PENDING, then probably we got aborted
1083 * and there should be NO clients on this entry
1084 */
1085 assert(EBIT_TEST(flags, ENTRY_ABORTED));
1086 assert(mem_obj->nclients == 0);
1087 return;
1088 }
1089
1090 /* This is suspect: mem obj offsets include the headers. do we adjust for that
1091 * in use of object_sz?
1092 */
1093 mem_obj->object_sz = mem_obj->endOffset();
1094
1095 store_status = STORE_OK;
1096
1097 assert(mem_status == NOT_IN_MEMORY);
1098
1099 if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1100 lengthWentBad("!validLength() in complete()");
1101
1102 #if USE_CACHE_DIGESTS
1103 if (mem_obj->request)
1104 mem_obj->request->hier.store_complete_stop = current_time;
1105
1106 #endif
1107 /*
1108 * We used to call invokeHandlers, then storeSwapOut. However,
1109 * Madhukar Reddy <myreddy@persistence.com> reported that
1110 * responses without content length would sometimes get released
1111 * in client_side, thinking that the response is incomplete.
1112 */
1113 invokeHandlers();
1114 }
1115
1116 /*
1117 * Someone wants to abort this transfer. Set the reason in the
1118 * request structure, call the callback and mark the
1119 * entry for releasing
1120 */
1121 void
1122 StoreEntry::abort()
1123 {
1124 ++statCounter.aborted_requests;
1125 assert(store_status == STORE_PENDING);
1126 assert(mem_obj != NULL);
1127 debugs(20, 6, "storeAbort: " << getMD5Text());
1128
1129 lock("StoreEntry::abort"); /* lock while aborting */
1130 negativeCache();
1131
1132 releaseRequest();
1133
1134 EBIT_SET(flags, ENTRY_ABORTED);
1135
1136 setMemStatus(NOT_IN_MEMORY);
1137
1138 store_status = STORE_OK;
1139
1140 /* Notify the server side */
1141
1142 /*
1143 * DPW 2007-05-07
1144 * Should we check abort.data for validity?
1145 */
1146 if (mem_obj->abort.callback) {
1147 if (!cbdataReferenceValid(mem_obj->abort.data))
1148 debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1149 eventAdd("mem_obj->abort.callback",
1150 mem_obj->abort.callback,
1151 mem_obj->abort.data,
1152 0.0,
1153 true);
1154 unregisterAbort();
1155 }
1156
1157 /* XXX Should we reverse these two, so that there is no
1158 * unneeded disk swapping triggered?
1159 */
1160 /* Notify the client side */
1161 invokeHandlers();
1162
1163 // abort swap out, invalidating what was created so far (release follows)
1164 swapOutFileClose(StoreIOState::writerGone);
1165
1166 unlock("StoreEntry::abort"); /* unlock */
1167 }
1168
1169 /**
1170 * Clear Memory storage to accommodate the given object len
1171 */
1172 void
1173 storeGetMemSpace(int size)
1174 {
1175 PROF_start(storeGetMemSpace);
1176 StoreEntry *e = NULL;
1177 int released = 0;
1178 static time_t last_check = 0;
1179 size_t pages_needed;
1180 RemovalPurgeWalker *walker;
1181
1182 if (squid_curtime == last_check) {
1183 PROF_stop(storeGetMemSpace);
1184 return;
1185 }
1186
1187 last_check = squid_curtime;
1188
1189 pages_needed = (size + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
1190
1191 if (mem_node::InUseCount() + pages_needed < store_pages_max) {
1192 PROF_stop(storeGetMemSpace);
1193 return;
1194 }
1195
1196 debugs(20, 2, "storeGetMemSpace: Starting, need " << pages_needed <<
1197 " pages");
1198
1199 /* XXX what to set as max_scan here? */
1200 walker = mem_policy->PurgeInit(mem_policy, 100000);
1201
1202 while ((e = walker->Next(walker))) {
1203 e->purgeMem();
1204 ++released;
1205
1206 if (mem_node::InUseCount() + pages_needed < store_pages_max)
1207 break;
1208 }
1209
1210 walker->Done(walker);
1211 debugs(20, 3, "storeGetMemSpace stats:");
1212 debugs(20, 3, " " << std::setw(6) << hot_obj_count << " HOT objects");
1213 debugs(20, 3, " " << std::setw(6) << released << " were released");
1214 PROF_stop(storeGetMemSpace);
1215 }
1216
1217 /* thunk through to Store::Root().maintain(). Note that this would be better still
1218 * if registered against the root store itself, but that requires more complex
1219 * update logic - bigger fish to fry first. Long term each store when
1220 * it becomes active will self register
1221 */
1222 void
1223 Store::Maintain(void *)
1224 {
1225 Store::Root().maintain();
1226
1227 /* Reregister a maintain event .. */
1228 eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1229
1230 }
1231
1232 /* The maximum objects to scan for maintain storage space */
1233 #define MAINTAIN_MAX_SCAN 1024
1234 #define MAINTAIN_MAX_REMOVE 64
1235
1236 /*
1237 * This routine is to be called by main loop in main.c.
1238 * It removes expired objects on only one bucket for each time called.
1239 *
1240 * This should get called 1/s from main().
1241 */
1242 void
1243 StoreController::maintain()
1244 {
1245 static time_t last_warn_time = 0;
1246
1247 PROF_start(storeMaintainSwapSpace);
1248 swapDir->maintain();
1249
1250 /* this should be emitted by the oversize dir, not globally */
1251
1252 if (Store::Root().currentSize() > Store::Root().maxSize()) {
1253 if (squid_curtime - last_warn_time > 10) {
1254 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
1255 << Store::Root().currentSize() / 1024.0 << " KB > "
1256 << (Store::Root().maxSize() >> 10) << " KB");
1257 last_warn_time = squid_curtime;
1258 }
1259 }
1260
1261 PROF_stop(storeMaintainSwapSpace);
1262 }
1263
1264 /* release an object from a cache */
1265 void
1266 StoreEntry::release()
1267 {
1268 PROF_start(storeRelease);
1269 debugs(20, 3, "releasing " << *this << ' ' << getMD5Text());
1270 /* If, for any reason we can't discard this object because of an
1271 * outstanding request, mark it for pending release */
1272
1273 if (locked()) {
1274 expireNow();
1275 debugs(20, 3, "storeRelease: Only setting RELEASE_REQUEST bit");
1276 releaseRequest();
1277 PROF_stop(storeRelease);
1278 return;
1279 }
1280
1281 Store::Root().memoryUnlink(*this);
1282
1283 if (StoreController::store_dirs_rebuilding && swap_filen > -1) {
1284 setPrivateKey();
1285
1286 if (swap_filen > -1) {
1287 // lock the entry until rebuilding is done
1288 lock("storeLateRelease");
1289 setReleaseFlag();
1290 LateReleaseStack.push(this);
1291 } else {
1292 destroyStoreEntry(static_cast<hash_link *>(this));
1293 // "this" is no longer valid
1294 }
1295
1296 PROF_stop(storeRelease);
1297 return;
1298 }
1299
1300 storeLog(STORE_LOG_RELEASE, this);
1301
1302 if (swap_filen > -1) {
1303 // log before unlink() below clears swap_filen
1304 if (!EBIT_TEST(flags, KEY_PRIVATE))
1305 storeDirSwapLog(this, SWAP_LOG_DEL);
1306
1307 unlink();
1308 }
1309
1310 destroyStoreEntry(static_cast<hash_link *>(this));
1311 PROF_stop(storeRelease);
1312 }
1313
1314 static void
1315 storeLateRelease(void *)
1316 {
1317 StoreEntry *e;
1318 static int n = 0;
1319
1320 if (StoreController::store_dirs_rebuilding) {
1321 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1322 return;
1323 }
1324
1325 // TODO: this works but looks unelegant.
1326 for (int i = 0; i < 10; ++i) {
1327 if (LateReleaseStack.empty()) {
1328 debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1329 return;
1330 } else {
1331 e = LateReleaseStack.top();
1332 LateReleaseStack.pop();
1333 }
1334
1335 e->unlock("storeLateRelease");
1336 ++n;
1337 }
1338
1339 eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1340 }
1341
1342 /* return 1 if a store entry is locked */
1343 int
1344 StoreEntry::locked() const
1345 {
1346 if (lock_count)
1347 return 1;
1348
1349 /*
1350 * SPECIAL, PUBLIC entries should be "locked";
1351 * XXX: Their owner should lock them then instead of relying on this hack.
1352 */
1353 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1354 if (!EBIT_TEST(flags, KEY_PRIVATE))
1355 return 1;
1356
1357 return 0;
1358 }
1359
1360 bool
1361 StoreEntry::validLength() const
1362 {
1363 int64_t diff;
1364 const HttpReply *reply;
1365 assert(mem_obj != NULL);
1366 reply = getReply();
1367 debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1368 debugs(20, 5, "storeEntryValidLength: object_len = " <<
1369 objectLen());
1370 debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz);
1371 debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1372
1373 if (reply->content_length < 0) {
1374 debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1375 return 1;
1376 }
1377
1378 if (reply->hdr_sz == 0) {
1379 debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1380 return 1;
1381 }
1382
1383 if (mem_obj->method == Http::METHOD_HEAD) {
1384 debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1385 return 1;
1386 }
1387
1388 if (reply->sline.status() == Http::scNotModified)
1389 return 1;
1390
1391 if (reply->sline.status() == Http::scNoContent)
1392 return 1;
1393
1394 diff = reply->hdr_sz + reply->content_length - objectLen();
1395
1396 if (diff == 0)
1397 return 1;
1398
1399 debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1400
1401 return 0;
1402 }
1403
1404 static void
1405 storeRegisterWithCacheManager(void)
1406 {
1407 Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1408 Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1409 Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1410 storeCheckCachableStats, 0, 1);
1411 }
1412
1413 void
1414 storeInit(void)
1415 {
1416 storeKeyInit();
1417 mem_policy = createRemovalPolicy(Config.memPolicy);
1418 storeDigestInit();
1419 storeLogOpen();
1420 eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1421 Store::Root().init();
1422 storeRebuildStart();
1423
1424 storeRegisterWithCacheManager();
1425 }
1426
1427 /// computes maximum size of a cachable object
1428 /// larger objects are rejected by all (disk and memory) cache stores
1429 static int64_t
1430 storeCalcMaxObjSize()
1431 {
1432 int64_t ms = 0; // nothing can be cached without at least one store consent
1433
1434 // global maximum is at least the disk store maximum
1435 for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
1436 assert (Config.cacheSwap.swapDirs[i].getRaw());
1437 const int64_t storeMax = dynamic_cast<SwapDir *>(Config.cacheSwap.swapDirs[i].getRaw())->maxObjectSize();
1438 if (ms < storeMax)
1439 ms = storeMax;
1440 }
1441
1442 // global maximum is at least the memory store maximum
1443 // TODO: move this into a memory cache class when we have one
1444 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
1445 if (ms < memMax)
1446 ms = memMax;
1447
1448 return ms;
1449 }
1450
1451 void
1452 storeConfigure(void)
1453 {
1454 store_swap_high = (long) (((float) Store::Root().maxSize() *
1455 (float) Config.Swap.highWaterMark) / (float) 100);
1456 store_swap_low = (long) (((float) Store::Root().maxSize() *
1457 (float) Config.Swap.lowWaterMark) / (float) 100);
1458 store_pages_max = Config.memMaxSize / sizeof(mem_node);
1459
1460 store_maxobjsize = storeCalcMaxObjSize();
1461 }
1462
1463 bool
1464 StoreEntry::memoryCachable()
1465 {
1466 if (!checkCachable())
1467 return 0;
1468
1469 if (mem_obj == NULL)
1470 return 0;
1471
1472 if (mem_obj->data_hdr.size() == 0)
1473 return 0;
1474
1475 if (mem_obj->inmem_lo != 0)
1476 return 0;
1477
1478 if (!Config.onoff.memory_cache_first && swap_status == SWAPOUT_DONE && refcount == 1)
1479 return 0;
1480
1481 return 1;
1482 }
1483
1484 int
1485 StoreEntry::checkNegativeHit() const
1486 {
1487 if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1488 return 0;
1489
1490 if (expires <= squid_curtime)
1491 return 0;
1492
1493 if (store_status != STORE_OK)
1494 return 0;
1495
1496 return 1;
1497 }
1498
1499 /**
1500 * Set object for negative caching.
1501 * Preserves any expiry information given by the server.
1502 * In absence of proper expiry info it will set to expire immediately,
1503 * or with HTTP-violations enabled the configured negative-TTL is observed
1504 */
1505 void
1506 StoreEntry::negativeCache()
1507 {
1508 // XXX: should make the default for expires 0 instead of -1
1509 // so we can distinguish "Expires: -1" from nothing.
1510 if (expires <= 0)
1511 #if USE_HTTP_VIOLATIONS
1512 expires = squid_curtime + Config.negativeTtl;
1513 #else
1514 expires = squid_curtime;
1515 #endif
1516 EBIT_SET(flags, ENTRY_NEGCACHED);
1517 }
1518
1519 void
1520 storeFreeMemory(void)
1521 {
1522 Store::Root(NULL);
1523 #if USE_CACHE_DIGESTS
1524 delete store_digest;
1525 #endif
1526 store_digest = NULL;
1527 }
1528
1529 int
1530 expiresMoreThan(time_t expires, time_t when)
1531 {
1532 if (expires < 0) /* No Expires given */
1533 return 1;
1534
1535 return (expires > (squid_curtime + when));
1536 }
1537
1538 int
1539 StoreEntry::validToSend() const
1540 {
1541 if (EBIT_TEST(flags, RELEASE_REQUEST))
1542 return 0;
1543
1544 if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1545 if (expires <= squid_curtime)
1546 return 0;
1547
1548 if (EBIT_TEST(flags, ENTRY_ABORTED))
1549 return 0;
1550
1551 // now check that the entry has a cache backing or is collapsed
1552 if (swap_filen > -1) // backed by a disk cache
1553 return 1;
1554
1555 if (swappingOut()) // will be backed by a disk cache
1556 return 1;
1557
1558 if (!mem_obj) // not backed by a memory cache and not collapsed
1559 return 0;
1560
1561 // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1562 // disk cache backing that store_client constructor will assert. XXX: This
1563 // is wrong for range requests (that could feed off nibbled memory) and for
1564 // entries backed by the shared memory cache (that could, in theory, get
1565 // nibbled bytes from that cache, but there is no such "memoryIn" code).
1566 if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1567 return 0;
1568
1569 // The following check is correct but useless at this position. TODO: Move
1570 // it up when the shared memory cache can either replenish locally nibbled
1571 // bytes or, better, does not use local RAM copy at all.
1572 // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1573 // return 1;
1574
1575 return 1;
1576 }
1577
1578 void
1579 StoreEntry::timestampsSet()
1580 {
1581 const HttpReply *reply = getReply();
1582 time_t served_date = reply->date;
1583 int age = reply->header.getInt(Http::HdrType::AGE);
1584 /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1585 /* make sure that 0 <= served_date <= squid_curtime */
1586
1587 if (served_date < 0 || served_date > squid_curtime)
1588 served_date = squid_curtime;
1589
1590 /* Bug 1791:
1591 * If the returned Date: is more than 24 hours older than
1592 * the squid_curtime, then one of us needs to use NTP to set our
1593 * clock. We'll pretend that our clock is right.
1594 */
1595 else if (served_date < (squid_curtime - 24 * 60 * 60) )
1596 served_date = squid_curtime;
1597
1598 /*
1599 * Compensate with Age header if origin server clock is ahead
1600 * of us and there is a cache in between us and the origin
1601 * server. But DONT compensate if the age value is larger than
1602 * squid_curtime because it results in a negative served_date.
1603 */
1604 if (age > squid_curtime - served_date)
1605 if (squid_curtime > age)
1606 served_date = squid_curtime - age;
1607
1608 // compensate for Squid-to-server and server-to-Squid delays
1609 if (mem_obj && mem_obj->request) {
1610 const time_t request_sent =
1611 mem_obj->request->hier.peer_http_request_sent.tv_sec;
1612 if (0 < request_sent && request_sent < squid_curtime)
1613 served_date -= (squid_curtime - request_sent);
1614 }
1615
1616 if (reply->expires > 0 && reply->date > -1)
1617 expires = served_date + (reply->expires - reply->date);
1618 else
1619 expires = reply->expires;
1620
1621 lastmod = reply->last_modified;
1622
1623 timestamp = served_date;
1624 }
1625
1626 void
1627 StoreEntry::registerAbort(STABH * cb, void *data)
1628 {
1629 assert(mem_obj);
1630 assert(mem_obj->abort.callback == NULL);
1631 mem_obj->abort.callback = cb;
1632 mem_obj->abort.data = cbdataReference(data);
1633 }
1634
1635 void
1636 StoreEntry::unregisterAbort()
1637 {
1638 assert(mem_obj);
1639 if (mem_obj->abort.callback) {
1640 mem_obj->abort.callback = NULL;
1641 cbdataReferenceDone(mem_obj->abort.data);
1642 }
1643 }
1644
1645 void
1646 StoreEntry::dump(int l) const
1647 {
1648 debugs(20, l, "StoreEntry->key: " << getMD5Text());
1649 debugs(20, l, "StoreEntry->next: " << next);
1650 debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1651 debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1652 debugs(20, l, "StoreEntry->lastref: " << lastref);
1653 debugs(20, l, "StoreEntry->expires: " << expires);
1654 debugs(20, l, "StoreEntry->lastmod: " << lastmod);
1655 debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1656 debugs(20, l, "StoreEntry->refcount: " << refcount);
1657 debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1658 debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1659 debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1660 debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1661 debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1662 debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1663 debugs(20, l, "StoreEntry->store_status: " << store_status);
1664 debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1665 }
1666
1667 /*
1668 * NOTE, this function assumes only two mem states
1669 */
1670 void
1671 StoreEntry::setMemStatus(mem_status_t new_status)
1672 {
1673 if (new_status == mem_status)
1674 return;
1675
1676 // are we using a shared memory cache?
1677 if (Config.memShared && IamWorkerProcess()) {
1678 // This method was designed to update replacement policy, not to
1679 // actually purge something from the memory cache (TODO: rename?).
1680 // Shared memory cache does not have a policy that needs updates.
1681 mem_status = new_status;
1682 return;
1683 }
1684
1685 assert(mem_obj != NULL);
1686
1687 if (new_status == IN_MEMORY) {
1688 assert(mem_obj->inmem_lo == 0);
1689
1690 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1691 debugs(20, 4, "not inserting special " << *this << " into policy");
1692 } else {
1693 mem_policy->Add(mem_policy, this, &mem_obj->repl);
1694 debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1695 }
1696
1697 ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1698 } else {
1699 if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1700 debugs(20, 4, "not removing special " << *this << " from policy");
1701 } else {
1702 mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1703 debugs(20, 4, "removed " << *this);
1704 }
1705
1706 --hot_obj_count;
1707 }
1708
1709 mem_status = new_status;
1710 }
1711
1712 const char *
1713 StoreEntry::url() const
1714 {
1715 if (mem_obj == NULL)
1716 return "[null_mem_obj]";
1717 else
1718 return mem_obj->storeId();
1719 }
1720
1721 MemObject *
1722 StoreEntry::makeMemObject()
1723 {
1724 if (!mem_obj)
1725 mem_obj = new MemObject();
1726 return mem_obj;
1727 }
1728
1729 void
1730 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1731 {
1732 makeMemObject();
1733 mem_obj->setUris(aUrl, aLogUrl, aMethod);
1734 }
1735
1736 /** disable sending content to the clients.
1737 *
1738 * This just sets DELAY_SENDING.
1739 */
1740 void
1741 StoreEntry::buffer()
1742 {
1743 EBIT_SET(flags, DELAY_SENDING);
1744 }
1745
1746 /** flush any buffered content.
1747 *
1748 * This just clears DELAY_SENDING and Invokes the handlers
1749 * to begin sending anything that may be buffered.
1750 */
1751 void
1752 StoreEntry::flush()
1753 {
1754 if (EBIT_TEST(flags, DELAY_SENDING)) {
1755 EBIT_CLR(flags, DELAY_SENDING);
1756 invokeHandlers();
1757 }
1758 }
1759
1760 int64_t
1761 StoreEntry::objectLen() const
1762 {
1763 assert(mem_obj != NULL);
1764 return mem_obj->object_sz;
1765 }
1766
1767 int64_t
1768 StoreEntry::contentLen() const
1769 {
1770 assert(mem_obj != NULL);
1771 assert(getReply() != NULL);
1772 return objectLen() - getReply()->hdr_sz;
1773 }
1774
1775 HttpReply const *
1776 StoreEntry::getReply () const
1777 {
1778 if (NULL == mem_obj)
1779 return NULL;
1780
1781 return mem_obj->getReply();
1782 }
1783
1784 void
1785 StoreEntry::reset()
1786 {
1787 assert (mem_obj);
1788 debugs(20, 3, "StoreEntry::reset: " << url());
1789 mem_obj->reset();
1790 HttpReply *rep = (HttpReply *) getReply(); // bypass const
1791 rep->reset();
1792 expires = lastmod = timestamp = -1;
1793 }
1794
1795 /*
1796 * storeFsInit
1797 *
1798 * This routine calls the SETUP routine for each fs type.
1799 * I don't know where the best place for this is, and I'm not going to shuffle
1800 * around large chunks of code right now (that can be done once its working.)
1801 */
1802 void
1803 storeFsInit(void)
1804 {
1805 storeReplSetup();
1806 }
1807
1808 /*
1809 * called to add another store removal policy module
1810 */
1811 void
1812 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1813 {
1814 int i;
1815
1816 /* find the number of currently known repl types */
1817 for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1818 if (strcmp(storerepl_list[i].typestr, type) == 0) {
1819 debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1820 return;
1821 }
1822 }
1823
1824 /* add the new type */
1825 storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1826
1827 memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1828
1829 storerepl_list[i].typestr = type;
1830
1831 storerepl_list[i].create = create;
1832 }
1833
1834 /*
1835 * Create a removal policy instance
1836 */
1837 RemovalPolicy *
1838 createRemovalPolicy(RemovalPolicySettings * settings)
1839 {
1840 storerepl_entry_t *r;
1841
1842 for (r = storerepl_list; r && r->typestr; ++r) {
1843 if (strcmp(r->typestr, settings->type) == 0)
1844 return r->create(settings->args);
1845 }
1846
1847 debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1848 debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1849 debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!");
1850 fatalf("ERROR: Unknown policy %s\n", settings->type);
1851 return NULL; /* NOTREACHED */
1852 }
1853
1854 #if 0
1855 void
1856 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1857 {
1858 if (e->swap_file_number == filn)
1859 return;
1860
1861 if (filn < 0) {
1862 assert(-1 == filn);
1863 storeDirMapBitReset(e->swap_file_number);
1864 storeDirLRUDelete(e);
1865 e->swap_file_number = -1;
1866 } else {
1867 assert(-1 == e->swap_file_number);
1868 storeDirMapBitSet(e->swap_file_number = filn);
1869 storeDirLRUAdd(e);
1870 }
1871 }
1872
1873 #endif
1874
1875 void
1876 StoreEntry::storeErrorResponse(HttpReply *reply)
1877 {
1878 lock("StoreEntry::storeErrorResponse");
1879 buffer();
1880 replaceHttpReply(reply);
1881 flush();
1882 complete();
1883 negativeCache();
1884 releaseRequest();
1885 unlock("StoreEntry::storeErrorResponse");
1886 }
1887
1888 /*
1889 * Replace a store entry with
1890 * a new reply. This eats the reply.
1891 */
1892 void
1893 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1894 {
1895 debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1896
1897 if (!mem_obj) {
1898 debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1899 return;
1900 }
1901
1902 mem_obj->replaceHttpReply(rep);
1903
1904 if (andStartWriting)
1905 startWriting();
1906 }
1907
1908 void
1909 StoreEntry::startWriting()
1910 {
1911 /* TODO: when we store headers separately remove the header portion */
1912 /* TODO: mark the length of the headers ? */
1913 /* We ONLY want the headers */
1914
1915 assert (isEmpty());
1916 assert(mem_obj);
1917
1918 const HttpReply *rep = getReply();
1919 assert(rep);
1920
1921 buffer();
1922 rep->packHeadersInto(this);
1923 mem_obj->markEndOfReplyHeaders();
1924 EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1925
1926 rep->body.packInto(this);
1927 flush();
1928 }
1929
1930 char const *
1931 StoreEntry::getSerialisedMetaData()
1932 {
1933 StoreMeta *tlv_list = storeSwapMetaBuild(this);
1934 int swap_hdr_sz;
1935 char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1936 storeSwapTLVFree(tlv_list);
1937 assert (swap_hdr_sz >= 0);
1938 mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1939 return result;
1940 }
1941
1942 /**
1943 * Abandon the transient entry our worker has created if neither the shared
1944 * memory cache nor the disk cache wants to store it. Collapsed requests, if
1945 * any, should notice and use Plan B instead of getting stuck waiting for us
1946 * to start swapping the entry out.
1947 */
1948 void
1949 StoreEntry::transientsAbandonmentCheck()
1950 {
1951 if (mem_obj && !mem_obj->smpCollapsed && // this worker is responsible
1952 mem_obj->xitTable.index >= 0 && // other workers may be interested
1953 mem_obj->memCache.index < 0 && // rejected by the shared memory cache
1954 mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1955 debugs(20, 7, "cannot be shared: " << *this);
1956 if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1957 Store::Root().transientsAbandon(*this);
1958 }
1959 }
1960
1961 void
1962 StoreEntry::memOutDecision(const bool)
1963 {
1964 transientsAbandonmentCheck();
1965 }
1966
1967 void
1968 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1969 {
1970 // Abandon our transient entry if neither shared memory nor disk wants it.
1971 assert(mem_obj);
1972 mem_obj->swapout.decision = decision;
1973 transientsAbandonmentCheck();
1974 }
1975
1976 void
1977 StoreEntry::trimMemory(const bool preserveSwappable)
1978 {
1979 /*
1980 * DPW 2007-05-09
1981 * Bug #1943. We must not let go any data for IN_MEMORY
1982 * objects. We have to wait until the mem_status changes.
1983 */
1984 if (mem_status == IN_MEMORY)
1985 return;
1986
1987 if (EBIT_TEST(flags, ENTRY_SPECIAL))
1988 return; // cannot trim because we do not load them again
1989
1990 if (preserveSwappable)
1991 mem_obj->trimSwappable();
1992 else
1993 mem_obj->trimUnSwappable();
1994
1995 debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1996 }
1997
1998 bool
1999 StoreEntry::modifiedSince(HttpRequest * request) const
2000 {
2001 int object_length;
2002 time_t mod_time = lastmod;
2003
2004 if (mod_time < 0)
2005 mod_time = timestamp;
2006
2007 debugs(88, 3, "modifiedSince: '" << url() << "'");
2008
2009 debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
2010
2011 if (mod_time < 0)
2012 return true;
2013
2014 /* Find size of the object */
2015 object_length = getReply()->content_length;
2016
2017 if (object_length < 0)
2018 object_length = contentLen();
2019
2020 if (mod_time > request->ims) {
2021 debugs(88, 3, "--> YES: entry newer than client");
2022 return true;
2023 } else if (mod_time < request->ims) {
2024 debugs(88, 3, "--> NO: entry older than client");
2025 return false;
2026 } else if (request->imslen < 0) {
2027 debugs(88, 3, "--> NO: same LMT, no client length");
2028 return false;
2029 } else if (request->imslen == object_length) {
2030 debugs(88, 3, "--> NO: same LMT, same length");
2031 return false;
2032 } else {
2033 debugs(88, 3, "--> YES: same LMT, different length");
2034 return true;
2035 }
2036 }
2037
2038 bool
2039 StoreEntry::hasEtag(ETag &etag) const
2040 {
2041 if (const HttpReply *reply = getReply()) {
2042 etag = reply->header.getETag(Http::HdrType::ETAG);
2043 if (etag.str)
2044 return true;
2045 }
2046 return false;
2047 }
2048
2049 bool
2050 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
2051 {
2052 const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
2053 return hasOneOfEtags(reqETags, false);
2054 }
2055
2056 bool
2057 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
2058 {
2059 const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
2060 // weak comparison is allowed only for HEAD or full-body GET requests
2061 const bool allowWeakMatch = !request.flags.isRanged &&
2062 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
2063 return hasOneOfEtags(reqETags, allowWeakMatch);
2064 }
2065
2066 /// whether at least one of the request ETags matches entity ETag
2067 bool
2068 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
2069 {
2070 const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
2071 if (!repETag.str)
2072 return strListIsMember(&reqETags, "*", ',');
2073
2074 bool matched = false;
2075 const char *pos = NULL;
2076 const char *item;
2077 int ilen;
2078 while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
2079 if (!strncmp(item, "*", ilen))
2080 matched = true;
2081 else {
2082 String str;
2083 str.append(item, ilen);
2084 ETag reqETag;
2085 if (etagParseInit(&reqETag, str.termedBuf())) {
2086 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
2087 etagIsStrongEqual(repETag, reqETag);
2088 }
2089 }
2090 }
2091 return matched;
2092 }
2093
2094 SwapDir::Pointer
2095 StoreEntry::store() const
2096 {
2097 assert(0 <= swap_dirn && swap_dirn < Config.cacheSwap.n_configured);
2098 return INDEXSD(swap_dirn);
2099 }
2100
2101 void
2102 StoreEntry::unlink()
2103 {
2104 store()->unlink(*this); // implies disconnect()
2105 swap_filen = -1;
2106 swap_dirn = -1;
2107 swap_status = SWAPOUT_NONE;
2108 }
2109
2110 /*
2111 * return true if the entry is in a state where
2112 * it can accept more data (ie with write() method)
2113 */
2114 bool
2115 StoreEntry::isAccepting() const
2116 {
2117 if (STORE_PENDING != store_status)
2118 return false;
2119
2120 if (EBIT_TEST(flags, ENTRY_ABORTED))
2121 return false;
2122
2123 return true;
2124 }
2125
2126 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2127 {
2128 os << "e:";
2129
2130 if (e.mem_obj) {
2131 if (e.mem_obj->xitTable.index > -1)
2132 os << 't' << e.mem_obj->xitTable.index;
2133 if (e.mem_obj->memCache.index > -1)
2134 os << 'm' << e.mem_obj->memCache.index;
2135 }
2136 if (e.swap_filen > -1 || e.swap_dirn > -1)
2137 os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2138
2139 os << '=';
2140
2141 // print only non-default status values, using unique letters
2142 if (e.mem_status != NOT_IN_MEMORY ||
2143 e.store_status != STORE_PENDING ||
2144 e.swap_status != SWAPOUT_NONE ||
2145 e.ping_status != PING_NONE) {
2146 if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2147 if (e.store_status != STORE_PENDING) os << 's';
2148 if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2149 if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2150 }
2151
2152 // print only set flags, using unique letters
2153 if (e.flags) {
2154 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2155 if (EBIT_TEST(e.flags, ENTRY_REVALIDATE)) os << 'R';
2156 if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2157 if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2158 if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2159 if (EBIT_TEST(e.flags, ENTRY_DISPATCHED)) os << 'D';
2160 if (EBIT_TEST(e.flags, KEY_PRIVATE)) os << 'I';
2161 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2162 if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2163 if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2164 if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2165 if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2166 }
2167
2168 if (e.mem_obj && e.mem_obj->smpCollapsed)
2169 os << 'O';
2170
2171 return os << '/' << &e << '*' << e.locks();
2172 }
2173
2174 /* NullStoreEntry */
2175
2176 NullStoreEntry NullStoreEntry::_instance;
2177
2178 NullStoreEntry *
2179 NullStoreEntry::getInstance()
2180 {
2181 return &_instance;
2182 }
2183
2184 char const *
2185 NullStoreEntry::getMD5Text() const
2186 {
2187 return "N/A";
2188 }
2189
2190 void
2191 NullStoreEntry::operator delete(void*)
2192 {
2193 fatal ("Attempt to delete NullStoreEntry\n");
2194 }
2195
2196 char const *
2197 NullStoreEntry::getSerialisedMetaData()
2198 {
2199 return NULL;
2200 }
2201