]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Addressed and updated XXXs and TODOs.
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * DEBUG: section 20 Memory Cache
3 *
4 */
5
6 #include "squid.h"
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
9 #include "HttpReply.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "MemStore.h"
14 #include "mime_header.h"
15 #include "SquidConfig.h"
16 #include "SquidMath.h"
17 #include "StoreStats.h"
18 #include "tools.h"
19
20 /// shared memory segment path to use for MemStore maps
21 static const char *MapLabel = "cache_mem_map";
22 /// shared memory segment path to use for the free slices index
23 static const char *SpaceLabel = "cache_mem_space";
24 // TODO: sync with Rock::SwapDir::*Path()
25
26 // We store free slot IDs (i.e., "space") as Page objects so that we can use
27 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
28 // used except for a positivity test. A unique value is handy for debugging.
29 static const uint32_t SpacePoolId = 510716;
30
31
32 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
33 {
34 }
35
36 MemStore::~MemStore()
37 {
38 delete map;
39 }
40
41 void
42 MemStore::init()
43 {
44 const int64_t entryLimit = EntryLimit();
45 if (entryLimit <= 0)
46 return; // no memory cache configured or a misconfiguration
47
48 // check compatibility with the disk cache, if any
49 if (Config.cacheSwap.n_configured > 0) {
50 const int64_t diskMaxSize = Store::Root().maxObjectSize();
51 const int64_t memMaxSize = maxObjectSize();
52 if (diskMaxSize == -1) {
53 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
54 "is unlimited but mem-cache maximum object size is " <<
55 memMaxSize / 1024.0 << " KB");
56 } else if (diskMaxSize > memMaxSize) {
57 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
58 "is too large for mem-cache: " <<
59 diskMaxSize / 1024.0 << " KB > " <<
60 memMaxSize / 1024.0 << " KB");
61 }
62 }
63
64 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
65
66 Must(!map);
67 map = new MemStoreMap(MapLabel);
68 map->cleaner = this;
69 }
70
71 void
72 MemStore::getStats(StoreInfoStats &stats) const
73 {
74 const size_t pageSize = Ipc::Mem::PageSize();
75
76 stats.mem.shared = true;
77 stats.mem.capacity =
78 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
79 stats.mem.size =
80 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
81 stats.mem.count = currentCount();
82 }
83
84 void
85 MemStore::stat(StoreEntry &e) const
86 {
87 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
88
89 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
90 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
91 currentSize() / 1024.0,
92 Math::doublePercent(currentSize(), maxSize()));
93
94 if (map) {
95 const int limit = map->entryLimit();
96 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
97 if (limit > 0) {
98 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
99 currentCount(), (100.0 * currentCount() / limit));
100
101 const unsigned int slotsFree =
102 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
103 if (slotsFree <= static_cast<const unsigned int>(limit)) {
104 const int usedSlots = limit - static_cast<const int>(slotsFree);
105 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
106 usedSlots, (100.0 * usedSlots / limit));
107 }
108
109 if (limit < 100) { // XXX: otherwise too expensive to count
110 Ipc::ReadWriteLockStats stats;
111 map->updateStats(stats);
112 stats.dump(e);
113 }
114 }
115 }
116 }
117
118 void
119 MemStore::maintain()
120 {
121 }
122
123 uint64_t
124 MemStore::minSize() const
125 {
126 return 0; // XXX: irrelevant, but Store parent forces us to implement this
127 }
128
129 uint64_t
130 MemStore::maxSize() const
131 {
132 return Config.memMaxSize;
133 }
134
135 uint64_t
136 MemStore::currentSize() const
137 {
138 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
139 Ipc::Mem::PageSize();
140 }
141
142 uint64_t
143 MemStore::currentCount() const
144 {
145 return map ? map->entryCount() : 0;
146 }
147
148 int64_t
149 MemStore::maxObjectSize() const
150 {
151 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
152 }
153
154 void
155 MemStore::reference(StoreEntry &)
156 {
157 }
158
159 bool
160 MemStore::dereference(StoreEntry &, bool)
161 {
162 // no need to keep e in the global store_table for us; we have our own map
163 return false;
164 }
165
166 int
167 MemStore::callback()
168 {
169 return 0;
170 }
171
172 StoreSearch *
173 MemStore::search(String const, HttpRequest *)
174 {
175 fatal("not implemented");
176 return NULL;
177 }
178
179 StoreEntry *
180 MemStore::get(const cache_key *key)
181 {
182 if (!map)
183 return NULL;
184
185 sfileno index;
186 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
187 if (!slot)
188 return NULL;
189
190 // create a brand new store entry and initialize it with stored info
191 StoreEntry *e = new StoreEntry();
192
193 // XXX: We do not know the URLs yet, only the key, but we need to parse and
194 // store the response for the Root().get() callers to be happy because they
195 // expect IN_MEMORY entries to already have the response headers and body.
196 e->makeMemObject();
197
198 anchorEntry(*e, index, *slot);
199
200 const bool copied = copyFromShm(*e, index, *slot);
201
202 if (copied) {
203 e->hashInsert(key);
204 return e;
205 }
206
207 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
208 map->freeEntry(index); // do not let others into the same trap
209 return NULL;
210 }
211
212 void
213 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
214 {
215 // XXX: not needed but Store parent forces us to implement this
216 fatal("MemStore::get(key,callback,data) should not be called");
217 }
218
219 bool
220 MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
221 {
222 if (!map)
223 return false;
224
225 sfileno index;
226 const Ipc::StoreMapAnchor *const slot = map->openForReading(
227 reinterpret_cast<cache_key*>(collapsed.key), index);
228 if (!slot)
229 return false;
230
231 anchorEntry(collapsed, index, *slot);
232 inSync = updateCollapsedWith(collapsed, index, *slot);
233 return true; // even if inSync is false
234 }
235
236 bool
237 MemStore::updateCollapsed(StoreEntry &collapsed)
238 {
239 assert(collapsed.mem_obj);
240
241 const sfileno index = collapsed.mem_obj->memCache.index;
242
243 // already disconnected from the cache, no need to update
244 if (index < 0)
245 return true;
246
247 if (!map)
248 return false;
249
250 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
251 return updateCollapsedWith(collapsed, index, anchor);
252 }
253
254 /// updates collapsed entry after its anchor has been located
255 bool
256 MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
257 {
258 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
259 const bool copied = copyFromShm(collapsed, index, anchor);
260 return copied;
261 }
262
263 /// anchors StoreEntry to an already locked map entry
264 void
265 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
266 {
267 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
268
269 e.swap_file_sz = basics.swap_file_sz;
270 e.lastref = basics.lastref;
271 e.timestamp = basics.timestamp;
272 e.expires = basics.expires;
273 e.lastmod = basics.lastmod;
274 e.refcount = basics.refcount;
275 e.flags = basics.flags;
276
277 assert(e.mem_obj);
278 if (anchor.complete()) {
279 e.store_status = STORE_OK;
280 e.mem_obj->object_sz = e.swap_file_sz;
281 e.setMemStatus(IN_MEMORY);
282 } else {
283 e.store_status = STORE_PENDING;
284 assert(e.mem_obj->object_sz < 0);
285 e.setMemStatus(NOT_IN_MEMORY);
286 }
287 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
288 e.ping_status = PING_NONE;
289
290 EBIT_CLR(e.flags, RELEASE_REQUEST);
291 EBIT_CLR(e.flags, KEY_PRIVATE);
292 EBIT_SET(e.flags, ENTRY_VALIDATED);
293
294 MemObject::MemCache &mc = e.mem_obj->memCache;
295 mc.index = index;
296 mc.io = MemObject::ioReading;
297 }
298
299 /// copies the entire entry from shared to local memory
300 bool
301 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
302 {
303 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
304 assert(e.mem_obj);
305
306 // emulate the usual Store code but w/o inapplicable checks and callbacks:
307
308 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
309 bool wasEof = anchor.complete() && sid < 0;
310 int64_t sliceOffset = 0;
311 while (sid >= 0) {
312 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
313 // slice state may change during copying; take snapshots now
314 wasEof = anchor.complete() && slice.next < 0;
315 const Ipc::StoreMapSlice::Size wasSize = slice.size;
316
317 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
318 wasEof << " wasSize " << wasSize << " <= " <<
319 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
320 " mem.endOffset " << e.mem_obj->endOffset());
321
322 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
323 // size of the slice data that we already copied
324 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
325 assert(prefixSize <= wasSize);
326
327 const MemStoreMap::Extras &extras = map->extras(sid);
328 char *page = static_cast<char*>(PagePointer(extras.page));
329 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
330 e.mem_obj->endOffset(),
331 page + prefixSize);
332 if (!copyFromShmSlice(e, sliceBuf, wasEof))
333 return false;
334 debugs(20, 9, "entry " << index << " copied slice " << sid <<
335 " from " << extras.page << " +" << prefixSize);
336 }
337 // else skip a [possibly incomplete] slice that we copied earlier
338
339 // careful: the slice may have grown _and_ gotten the next slice ID!
340 if (slice.next >= 0) {
341 assert(!wasEof);
342 // here we know that slice.size may not change any more
343 if (wasSize >= slice.size) { // did not grow since we started copying
344 sliceOffset += wasSize;
345 sid = slice.next;
346 }
347 } else if (wasSize >= slice.size) { // did not grow
348 break;
349 }
350 }
351
352 if (!wasEof) {
353 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
354 anchor.basics.swap_file_sz << " bytes of " << e);
355 return true;
356 }
357
358 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
359 anchor.basics.swap_file_sz << " bytes of " << e);
360
361 // from StoreEntry::complete()
362 e.mem_obj->object_sz = e.mem_obj->endOffset();
363 e.store_status = STORE_OK;
364 e.setMemStatus(IN_MEMORY);
365
366 assert(e.mem_obj->object_sz >= 0);
367 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
368 // would be nice to call validLength() here, but it needs e.key
369
370 // we read the entire response into the local memory; no more need to lock
371 disconnect(e);
372 return true;
373 }
374
375 /// imports one shared memory slice into local memory
376 bool
377 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
378 {
379 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
380
381 // from store_client::readBody()
382 // parse headers if needed; they might span multiple slices!
383 HttpReply *rep = (HttpReply *)e.getReply();
384 if (rep->pstate < psParsed) {
385 // XXX: have to copy because httpMsgParseStep() requires 0-termination
386 MemBuf mb;
387 mb.init(buf.length+1, buf.length+1);
388 mb.append(buf.data, buf.length);
389 mb.terminate();
390 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
391 if (result > 0) {
392 assert(rep->pstate == psParsed);
393 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
394 } else if (result < 0) {
395 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
396 return false;
397 } else { // more slices are needed
398 assert(!eof);
399 }
400 }
401 debugs(20, 7, "rep pstate: " << rep->pstate);
402
403 // local memory stores both headers and body so copy regardless of pstate
404 const int64_t offBefore = e.mem_obj->endOffset();
405 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
406 const int64_t offAfter = e.mem_obj->endOffset();
407 // expect to write the entire buf because StoreEntry::write() never fails
408 assert(offAfter >= 0 && offBefore <= offAfter &&
409 static_cast<size_t>(offAfter - offBefore) == buf.length);
410 return true;
411 }
412
413 /// whether we should cache the entry
414 bool
415 MemStore::shouldCache(const StoreEntry &e) const
416 {
417 if (e.mem_status == IN_MEMORY) {
418 debugs(20, 5, "already loaded from mem-cache: " << e);
419 return false;
420 }
421
422 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
423 debugs(20, 5, "already written to mem-cache: " << e);
424 return false;
425 }
426
427 if (!e.memoryCachable()) {
428 debugs(20, 7, HERE << "Not memory cachable: " << e);
429 return false; // will not cache due to entry state or properties
430 }
431
432 assert(e.mem_obj);
433 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
434
435 // objects of unknown size are not allowed into memory cache, for now
436 if (expectedSize < 0) {
437 debugs(20, 5, HERE << "Unknown expected size: " << e);
438 return false;
439 }
440
441 const int64_t loadedSize = e.mem_obj->endOffset();
442 const int64_t ramSize = max(loadedSize, expectedSize);
443
444 if (ramSize > maxObjectSize()) {
445 debugs(20, 5, HERE << "Too big max(" <<
446 loadedSize << ", " << expectedSize << "): " << e);
447 return false; // will not cache due to cachable entry size limits
448 }
449
450 if (!map) {
451 debugs(20, 5, HERE << "No map to mem-cache " << e);
452 return false;
453 }
454
455 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
456 debugs(20, 5, HERE << "Not mem-caching ENTRY_SPECIAL " << e);
457 return false;
458 }
459
460 return true;
461 }
462
463 /// locks map anchor and preps to store the entry in shared memory
464 bool
465 MemStore::startCaching(StoreEntry &e)
466 {
467 sfileno index = 0;
468 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
469 if (!slot) {
470 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
471 return false;
472 }
473
474 assert(e.mem_obj);
475 e.mem_obj->memCache.index = index;
476 e.mem_obj->memCache.io = MemObject::ioWriting;
477 slot->set(e);
478 map->startAppending(index);
479 return true;
480 }
481
482 /// copies all local data to shared memory
483 void
484 MemStore::copyToShm(StoreEntry &e)
485 {
486 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
487 // not knowing when the wait is over
488 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
489 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
490 return;
491 }
492
493 assert(map);
494 assert(e.mem_obj);
495
496 const int32_t index = e.mem_obj->memCache.index;
497 assert(index >= 0);
498 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
499
500 const int64_t eSize = e.mem_obj->endOffset();
501 if (e.mem_obj->memCache.offset >= eSize) {
502 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
503 e.mem_obj->memCache.offset << " >= " << eSize);
504 return; // nothing to do (yet)
505 }
506
507 if (anchor.start < 0) { // must allocate the very first slot for e
508 Ipc::Mem::PageId page;
509 anchor.start = reserveSapForWriting(page); // throws
510 map->extras(anchor.start).page = page;
511 }
512
513 lastWritingSlice = anchor.start;
514 const size_t sliceCapacity = Ipc::Mem::PageSize();
515
516 // fill, skip slices that are already full
517 // Optimize: remember lastWritingSlice in e.mem_obj
518 while (e.mem_obj->memCache.offset < eSize) {
519 Ipc::StoreMap::Slice &slice =
520 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
521
522 if (slice.size >= sliceCapacity) {
523 if (slice.next >= 0) {
524 lastWritingSlice = slice.next;
525 continue;
526 }
527
528 Ipc::Mem::PageId page;
529 slice.next = lastWritingSlice = reserveSapForWriting(page);
530 map->extras(lastWritingSlice).page = page;
531 debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
532 }
533
534 copyToShmSlice(e, anchor);
535 }
536
537 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
538 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
539 }
540
541 /// copies at most one slice worth of local memory to shared memory
542 void
543 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor)
544 {
545 Ipc::StoreMap::Slice &slice =
546 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
547
548 Ipc::Mem::PageId page = map->extras(lastWritingSlice).page;
549 assert(lastWritingSlice >= 0 && page);
550 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
551 page);
552
553 const int64_t bufSize = Ipc::Mem::PageSize();
554 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
555 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
556 static_cast<char*>(PagePointer(page)) + sliceOffset);
557
558 // check that we kept everything or purge incomplete/sparse cached entry
559 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
560 if (copied <= 0) {
561 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
562 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
563 " in " << page);
564 throw TexcHere("data_hdr.copy failure");
565 }
566
567 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
568 " from " << e.mem_obj->memCache.offset << " in " << page);
569
570 slice.size += copied;
571 e.mem_obj->memCache.offset += copied;
572 }
573
574 /// finds a slot and a free page to fill or throws
575 sfileno
576 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
577 {
578 Ipc::Mem::PageId slot;
579 if (freeSlots->pop(slot)) {
580 debugs(20, 5, "got a previously free slot: " << slot);
581
582 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
583 debugs(20, 5, "and got a previously free page: " << page);
584 return slot.number - 1;
585 } else {
586 debugs(20, 3, "but there is no free page, returning " << slot);
587 freeSlots->push(slot);
588 }
589 }
590
591 // catch free slots delivered to noteFreeMapSlice()
592 assert(!waitingFor);
593 waitingFor.slot = &slot;
594 waitingFor.page = &page;
595 if (map->purgeOne()) {
596 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
597 assert(slot.set());
598 assert(page.set());
599 debugs(20, 5, "got previously busy " << slot << " and " << page);
600 return slot.number - 1;
601 }
602 assert(waitingFor.slot == &slot && waitingFor.page == &page);
603 waitingFor.slot = NULL;
604 waitingFor.page = NULL;
605
606 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
607 throw TexcHere("ran out of mem-cache slots");
608 }
609
610 void
611 MemStore::noteFreeMapSlice(const sfileno sliceId)
612 {
613 Ipc::Mem::PageId &pageId = map->extras(sliceId).page;
614 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
615 assert(pageId);
616 Ipc::Mem::PageId slotId;
617 slotId.pool = SpacePoolId;
618 slotId.number = sliceId + 1;
619 if (!waitingFor) {
620 // must zero pageId before we give slice (and pageId extras!) to others
621 Ipc::Mem::PutPage(pageId);
622 freeSlots->push(slotId);
623 } else {
624 *waitingFor.slot = slotId;
625 *waitingFor.page = pageId;
626 waitingFor.slot = NULL;
627 waitingFor.page = NULL;
628 pageId = Ipc::Mem::PageId();
629 }
630 }
631
632 void
633 MemStore::write(StoreEntry &e)
634 {
635 assert(e.mem_obj);
636
637 debugs(20, 7, "entry " << e);
638
639 switch (e.mem_obj->memCache.io) {
640 case MemObject::ioUndecided:
641 if (!shouldCache(e) || !startCaching(e)) {
642 e.mem_obj->memCache.io = MemObject::ioDone;
643 Store::Root().transientsAbandon(e);
644 return;
645 }
646 break;
647
648 case MemObject::ioDone:
649 case MemObject::ioReading:
650 return; // we should not write in all of the above cases
651
652 case MemObject::ioWriting:
653 break; // already decided to write and still writing
654 }
655
656 try {
657 copyToShm(e);
658 if (e.store_status == STORE_OK) // done receiving new content
659 completeWriting(e);
660 else
661 CollapsedForwarding::Broadcast(e);
662 return;
663 }
664 catch (const std::exception &x) { // TODO: should we catch ... as well?
665 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
666 // fall through to the error handling code
667 }
668
669 disconnect(e);
670 }
671
672 void
673 MemStore::completeWriting(StoreEntry &e)
674 {
675 assert(e.mem_obj);
676 const int32_t index = e.mem_obj->memCache.index;
677 assert(index >= 0);
678 assert(map);
679
680 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
681
682 e.mem_obj->memCache.index = -1;
683 e.mem_obj->memCache.io = MemObject::ioDone;
684 map->closeForWriting(index, false);
685
686 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
687 Store::Root().transientsCompleteWriting(e);
688 }
689
690 void
691 MemStore::markForUnlink(StoreEntry &e)
692 {
693 assert(e.mem_obj);
694 if (e.mem_obj->memCache.index >= 0)
695 map->freeEntry(e.mem_obj->memCache.index);
696 }
697
698 void
699 MemStore::unlink(StoreEntry &e)
700 {
701 assert(e.mem_obj);
702 if (e.mem_obj->memCache.index >= 0) {
703 map->freeEntry(e.mem_obj->memCache.index);
704 disconnect(e);
705 } else {
706 // the entry may have been loaded and then disconnected from the cache
707 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
708 }
709
710 e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
711 }
712
713 void
714 MemStore::disconnect(StoreEntry &e)
715 {
716 assert(e.mem_obj);
717 MemObject &mem_obj = *e.mem_obj;
718 if (mem_obj.memCache.index >= 0) {
719 if (mem_obj.memCache.io == MemObject::ioWriting) {
720 map->abortWriting(mem_obj.memCache.index);
721 mem_obj.memCache.index = -1;
722 mem_obj.memCache.io = MemObject::ioDone;
723 Store::Root().transientsAbandon(e); // broadcasts after the change
724 } else {
725 assert(mem_obj.memCache.io == MemObject::ioReading);
726 map->closeForReading(mem_obj.memCache.index);
727 mem_obj.memCache.index = -1;
728 mem_obj.memCache.io = MemObject::ioDone;
729 }
730 }
731 }
732
733 /// calculates maximum number of entries we need to store and map
734 int64_t
735 MemStore::EntryLimit()
736 {
737 if (!Config.memShared || !Config.memMaxSize)
738 return 0; // no memory cache configured
739
740 const int64_t minEntrySize = Ipc::Mem::PageSize();
741 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
742 return entryLimit;
743 }
744
745 /// reports our needs for shared memory pages to Ipc::Mem::Pages
746 class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
747 {
748 public:
749 /* RegisteredRunner API */
750 virtual void run(const RunnerRegistry &r);
751 };
752
753 RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
754
755 void
756 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
757 {
758 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
759 }
760
761 /// decides whether to use a shared memory cache or checks its configuration
762 class MemStoreCfgRr: public ::RegisteredRunner
763 {
764 public:
765 /* RegisteredRunner API */
766 virtual void run(const RunnerRegistry &);
767 };
768
769 RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
770
771 void MemStoreCfgRr::run(const RunnerRegistry &r)
772 {
773 // decide whether to use a shared memory cache if the user did not specify
774 if (!Config.memShared.configured()) {
775 Config.memShared.configure(Ipc::Atomic::Enabled() &&
776 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
777 Config.memMaxSize > 0);
778 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
779 // bail if the user wants shared memory cache but we cannot support it
780 fatal("memory_cache_shared is on, but no support for atomic operations detected");
781 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
782 fatal("memory_cache_shared is on, but no support for shared memory detected");
783 } else if (Config.memShared && !UsingSmp()) {
784 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
785 " a single worker is running");
786 }
787 }
788
789 /// initializes shared memory segments used by MemStore
790 class MemStoreRr: public Ipc::Mem::RegisteredRunner
791 {
792 public:
793 /* RegisteredRunner API */
794 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL) {}
795 virtual void run(const RunnerRegistry &);
796 virtual ~MemStoreRr();
797
798 protected:
799 virtual void create(const RunnerRegistry &);
800
801 private:
802 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
803 MemStoreMap::Owner *mapOwner; ///< primary map Owner
804 };
805
806 RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
807
808 void MemStoreRr::run(const RunnerRegistry &r)
809 {
810 assert(Config.memShared.configured());
811 Ipc::Mem::RegisteredRunner::run(r);
812 }
813
814 void MemStoreRr::create(const RunnerRegistry &)
815 {
816 if (!Config.memShared)
817 return;
818
819 const int64_t entryLimit = MemStore::EntryLimit();
820 if (entryLimit <= 0) {
821 if (Config.memMaxSize > 0) {
822 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
823 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
824 (Ipc::Mem::PageSize() / 1024.0) << " KB");
825 }
826 return; // no memory cache configured or a misconfiguration
827 }
828
829 Must(!spaceOwner);
830 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
831 entryLimit,
832 sizeof(Ipc::Mem::PageId));
833 Must(!mapOwner);
834 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
835 }
836
837 MemStoreRr::~MemStoreRr()
838 {
839 delete mapOwner;
840 delete spaceOwner;
841 }