]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Merged from trunk (r13356).
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * DEBUG: section 20 Memory Cache
3 *
4 */
5
6 #include "squid.h"
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
9 #include "HttpReply.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "MemStore.h"
14 #include "mime_header.h"
15 #include "SquidConfig.h"
16 #include "SquidMath.h"
17 #include "StoreStats.h"
18 #include "tools.h"
19
20 /// shared memory segment path to use for MemStore maps
21 static const char *MapLabel = "cache_mem_map";
22 /// shared memory segment path to use for the free slices index
23 static const char *SpaceLabel = "cache_mem_space";
24 // TODO: sync with Rock::SwapDir::*Path()
25
26 // We store free slot IDs (i.e., "space") as Page objects so that we can use
27 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
28 // used except for a positivity test. A unique value is handy for debugging.
29 static const uint32_t SpacePoolId = 510716;
30
31 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
32 {
33 }
34
35 MemStore::~MemStore()
36 {
37 delete map;
38 }
39
40 void
41 MemStore::init()
42 {
43 const int64_t entryLimit = EntryLimit();
44 if (entryLimit <= 0)
45 return; // no memory cache configured or a misconfiguration
46
47 // check compatibility with the disk cache, if any
48 if (Config.cacheSwap.n_configured > 0) {
49 const int64_t diskMaxSize = Store::Root().maxObjectSize();
50 const int64_t memMaxSize = maxObjectSize();
51 if (diskMaxSize == -1) {
52 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
53 "is unlimited but mem-cache maximum object size is " <<
54 memMaxSize / 1024.0 << " KB");
55 } else if (diskMaxSize > memMaxSize) {
56 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
57 "is too large for mem-cache: " <<
58 diskMaxSize / 1024.0 << " KB > " <<
59 memMaxSize / 1024.0 << " KB");
60 }
61 }
62
63 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
64
65 Must(!map);
66 map = new MemStoreMap(MapLabel);
67 map->cleaner = this;
68 }
69
70 void
71 MemStore::getStats(StoreInfoStats &stats) const
72 {
73 const size_t pageSize = Ipc::Mem::PageSize();
74
75 stats.mem.shared = true;
76 stats.mem.capacity =
77 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
78 stats.mem.size =
79 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
80 stats.mem.count = currentCount();
81 }
82
83 void
84 MemStore::stat(StoreEntry &e) const
85 {
86 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
87
88 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
89 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
90 currentSize() / 1024.0,
91 Math::doublePercent(currentSize(), maxSize()));
92
93 if (map) {
94 const int limit = map->entryLimit();
95 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
96 if (limit > 0) {
97 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
98 currentCount(), (100.0 * currentCount() / limit));
99
100 const unsigned int slotsFree =
101 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
102 if (slotsFree <= static_cast<const unsigned int>(limit)) {
103 const int usedSlots = limit - static_cast<const int>(slotsFree);
104 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
105 usedSlots, (100.0 * usedSlots / limit));
106 }
107
108 if (limit < 100) { // XXX: otherwise too expensive to count
109 Ipc::ReadWriteLockStats stats;
110 map->updateStats(stats);
111 stats.dump(e);
112 }
113 }
114 }
115 }
116
117 void
118 MemStore::maintain()
119 {
120 }
121
122 uint64_t
123 MemStore::minSize() const
124 {
125 return 0; // XXX: irrelevant, but Store parent forces us to implement this
126 }
127
128 uint64_t
129 MemStore::maxSize() const
130 {
131 return Config.memMaxSize;
132 }
133
134 uint64_t
135 MemStore::currentSize() const
136 {
137 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
138 Ipc::Mem::PageSize();
139 }
140
141 uint64_t
142 MemStore::currentCount() const
143 {
144 return map ? map->entryCount() : 0;
145 }
146
147 int64_t
148 MemStore::maxObjectSize() const
149 {
150 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
151 }
152
153 void
154 MemStore::reference(StoreEntry &)
155 {
156 }
157
158 bool
159 MemStore::dereference(StoreEntry &, bool)
160 {
161 // no need to keep e in the global store_table for us; we have our own map
162 return false;
163 }
164
165 int
166 MemStore::callback()
167 {
168 return 0;
169 }
170
171 StoreSearch *
172 MemStore::search(String const, HttpRequest *)
173 {
174 fatal("not implemented");
175 return NULL;
176 }
177
178 StoreEntry *
179 MemStore::get(const cache_key *key)
180 {
181 if (!map)
182 return NULL;
183
184 sfileno index;
185 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
186 if (!slot)
187 return NULL;
188
189 // create a brand new store entry and initialize it with stored info
190 StoreEntry *e = new StoreEntry();
191
192 // XXX: We do not know the URLs yet, only the key, but we need to parse and
193 // store the response for the Root().get() callers to be happy because they
194 // expect IN_MEMORY entries to already have the response headers and body.
195 e->makeMemObject();
196
197 anchorEntry(*e, index, *slot);
198
199 const bool copied = copyFromShm(*e, index, *slot);
200
201 if (copied) {
202 e->hashInsert(key);
203 return e;
204 }
205
206 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
207 map->freeEntry(index); // do not let others into the same trap
208 return NULL;
209 }
210
211 void
212 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
213 {
214 // XXX: not needed but Store parent forces us to implement this
215 fatal("MemStore::get(key,callback,data) should not be called");
216 }
217
218 bool
219 MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
220 {
221 if (!map)
222 return false;
223
224 sfileno index;
225 const Ipc::StoreMapAnchor *const slot = map->openForReading(
226 reinterpret_cast<cache_key*>(collapsed.key), index);
227 if (!slot)
228 return false;
229
230 anchorEntry(collapsed, index, *slot);
231 inSync = updateCollapsedWith(collapsed, index, *slot);
232 return true; // even if inSync is false
233 }
234
235 bool
236 MemStore::updateCollapsed(StoreEntry &collapsed)
237 {
238 assert(collapsed.mem_obj);
239
240 const sfileno index = collapsed.mem_obj->memCache.index;
241
242 // already disconnected from the cache, no need to update
243 if (index < 0)
244 return true;
245
246 if (!map)
247 return false;
248
249 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
250 return updateCollapsedWith(collapsed, index, anchor);
251 }
252
253 /// updates collapsed entry after its anchor has been located
254 bool
255 MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
256 {
257 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
258 const bool copied = copyFromShm(collapsed, index, anchor);
259 return copied;
260 }
261
262 /// anchors StoreEntry to an already locked map entry
263 void
264 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
265 {
266 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
267
268 e.swap_file_sz = basics.swap_file_sz;
269 e.lastref = basics.lastref;
270 e.timestamp = basics.timestamp;
271 e.expires = basics.expires;
272 e.lastmod = basics.lastmod;
273 e.refcount = basics.refcount;
274 e.flags = basics.flags;
275
276 assert(e.mem_obj);
277 if (anchor.complete()) {
278 e.store_status = STORE_OK;
279 e.mem_obj->object_sz = e.swap_file_sz;
280 e.setMemStatus(IN_MEMORY);
281 } else {
282 e.store_status = STORE_PENDING;
283 assert(e.mem_obj->object_sz < 0);
284 e.setMemStatus(NOT_IN_MEMORY);
285 }
286 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
287 e.ping_status = PING_NONE;
288
289 EBIT_CLR(e.flags, RELEASE_REQUEST);
290 EBIT_CLR(e.flags, KEY_PRIVATE);
291 EBIT_SET(e.flags, ENTRY_VALIDATED);
292
293 MemObject::MemCache &mc = e.mem_obj->memCache;
294 mc.index = index;
295 mc.io = MemObject::ioReading;
296 }
297
298 /// copies the entire entry from shared to local memory
299 bool
300 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
301 {
302 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
303 assert(e.mem_obj);
304
305 // emulate the usual Store code but w/o inapplicable checks and callbacks:
306
307 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
308 bool wasEof = anchor.complete() && sid < 0;
309 int64_t sliceOffset = 0;
310 while (sid >= 0) {
311 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
312 // slice state may change during copying; take snapshots now
313 wasEof = anchor.complete() && slice.next < 0;
314 const Ipc::StoreMapSlice::Size wasSize = slice.size;
315
316 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
317 wasEof << " wasSize " << wasSize << " <= " <<
318 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
319 " mem.endOffset " << e.mem_obj->endOffset());
320
321 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
322 // size of the slice data that we already copied
323 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
324 assert(prefixSize <= wasSize);
325
326 const MemStoreMap::Extras &extras = map->extras(sid);
327 char *page = static_cast<char*>(PagePointer(extras.page));
328 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
329 e.mem_obj->endOffset(),
330 page + prefixSize);
331 if (!copyFromShmSlice(e, sliceBuf, wasEof))
332 return false;
333 debugs(20, 9, "entry " << index << " copied slice " << sid <<
334 " from " << extras.page << " +" << prefixSize);
335 }
336 // else skip a [possibly incomplete] slice that we copied earlier
337
338 // careful: the slice may have grown _and_ gotten the next slice ID!
339 if (slice.next >= 0) {
340 assert(!wasEof);
341 // here we know that slice.size may not change any more
342 if (wasSize >= slice.size) { // did not grow since we started copying
343 sliceOffset += wasSize;
344 sid = slice.next;
345 }
346 } else if (wasSize >= slice.size) { // did not grow
347 break;
348 }
349 }
350
351 if (!wasEof) {
352 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
353 anchor.basics.swap_file_sz << " bytes of " << e);
354 return true;
355 }
356
357 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
358 anchor.basics.swap_file_sz << " bytes of " << e);
359
360 // from StoreEntry::complete()
361 e.mem_obj->object_sz = e.mem_obj->endOffset();
362 e.store_status = STORE_OK;
363 e.setMemStatus(IN_MEMORY);
364
365 assert(e.mem_obj->object_sz >= 0);
366 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
367 // would be nice to call validLength() here, but it needs e.key
368
369 // we read the entire response into the local memory; no more need to lock
370 disconnect(e);
371 return true;
372 }
373
374 /// imports one shared memory slice into local memory
375 bool
376 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
377 {
378 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
379
380 // from store_client::readBody()
381 // parse headers if needed; they might span multiple slices!
382 HttpReply *rep = (HttpReply *)e.getReply();
383 if (rep->pstate < psParsed) {
384 // XXX: have to copy because httpMsgParseStep() requires 0-termination
385 MemBuf mb;
386 mb.init(buf.length+1, buf.length+1);
387 mb.append(buf.data, buf.length);
388 mb.terminate();
389 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
390 if (result > 0) {
391 assert(rep->pstate == psParsed);
392 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
393 } else if (result < 0) {
394 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
395 return false;
396 } else { // more slices are needed
397 assert(!eof);
398 }
399 }
400 debugs(20, 7, "rep pstate: " << rep->pstate);
401
402 // local memory stores both headers and body so copy regardless of pstate
403 const int64_t offBefore = e.mem_obj->endOffset();
404 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
405 const int64_t offAfter = e.mem_obj->endOffset();
406 // expect to write the entire buf because StoreEntry::write() never fails
407 assert(offAfter >= 0 && offBefore <= offAfter &&
408 static_cast<size_t>(offAfter - offBefore) == buf.length);
409 return true;
410 }
411
412 /// whether we should cache the entry
413 bool
414 MemStore::shouldCache(const StoreEntry &e) const
415 {
416 if (e.mem_status == IN_MEMORY) {
417 debugs(20, 5, "already loaded from mem-cache: " << e);
418 return false;
419 }
420
421 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
422 debugs(20, 5, "already written to mem-cache: " << e);
423 return false;
424 }
425
426 if (!e.memoryCachable()) {
427 debugs(20, 7, HERE << "Not memory cachable: " << e);
428 return false; // will not cache due to entry state or properties
429 }
430
431 assert(e.mem_obj);
432
433 if (e.mem_obj->vary_headers) {
434 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
435 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
436 return false;
437 }
438
439 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
440
441 // objects of unknown size are not allowed into memory cache, for now
442 if (expectedSize < 0) {
443 debugs(20, 5, "Unknown expected size: " << e);
444 return false;
445 }
446
447 const int64_t loadedSize = e.mem_obj->endOffset();
448 const int64_t ramSize = max(loadedSize, expectedSize);
449
450 if (ramSize > maxObjectSize()) {
451 debugs(20, 5, HERE << "Too big max(" <<
452 loadedSize << ", " << expectedSize << "): " << e);
453 return false; // will not cache due to cachable entry size limits
454 }
455
456 if (!map) {
457 debugs(20, 5, HERE << "No map to mem-cache " << e);
458 return false;
459 }
460
461 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
462 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
463 return false;
464 }
465
466 return true;
467 }
468
469 /// locks map anchor and preps to store the entry in shared memory
470 bool
471 MemStore::startCaching(StoreEntry &e)
472 {
473 sfileno index = 0;
474 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
475 if (!slot) {
476 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
477 return false;
478 }
479
480 assert(e.mem_obj);
481 e.mem_obj->memCache.index = index;
482 e.mem_obj->memCache.io = MemObject::ioWriting;
483 slot->set(e);
484 map->startAppending(index);
485 return true;
486 }
487
488 /// copies all local data to shared memory
489 void
490 MemStore::copyToShm(StoreEntry &e)
491 {
492 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
493 // not knowing when the wait is over
494 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
495 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
496 return;
497 }
498
499 assert(map);
500 assert(e.mem_obj);
501
502 const int32_t index = e.mem_obj->memCache.index;
503 assert(index >= 0);
504 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
505
506 const int64_t eSize = e.mem_obj->endOffset();
507 if (e.mem_obj->memCache.offset >= eSize) {
508 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
509 e.mem_obj->memCache.offset << " >= " << eSize);
510 return; // nothing to do (yet)
511 }
512
513 if (anchor.start < 0) { // must allocate the very first slot for e
514 Ipc::Mem::PageId page;
515 anchor.start = reserveSapForWriting(page); // throws
516 map->extras(anchor.start).page = page;
517 }
518
519 lastWritingSlice = anchor.start;
520 const size_t sliceCapacity = Ipc::Mem::PageSize();
521
522 // fill, skip slices that are already full
523 // Optimize: remember lastWritingSlice in e.mem_obj
524 while (e.mem_obj->memCache.offset < eSize) {
525 Ipc::StoreMap::Slice &slice =
526 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
527
528 if (slice.size >= sliceCapacity) {
529 if (slice.next >= 0) {
530 lastWritingSlice = slice.next;
531 continue;
532 }
533
534 Ipc::Mem::PageId page;
535 slice.next = lastWritingSlice = reserveSapForWriting(page);
536 map->extras(lastWritingSlice).page = page;
537 debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
538 }
539
540 copyToShmSlice(e, anchor);
541 }
542
543 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
544 }
545
546 /// copies at most one slice worth of local memory to shared memory
547 void
548 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor)
549 {
550 Ipc::StoreMap::Slice &slice =
551 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
552
553 Ipc::Mem::PageId page = map->extras(lastWritingSlice).page;
554 assert(lastWritingSlice >= 0 && page);
555 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
556 page);
557
558 const int64_t bufSize = Ipc::Mem::PageSize();
559 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
560 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
561 static_cast<char*>(PagePointer(page)) + sliceOffset);
562
563 // check that we kept everything or purge incomplete/sparse cached entry
564 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
565 if (copied <= 0) {
566 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
567 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
568 " in " << page);
569 throw TexcHere("data_hdr.copy failure");
570 }
571
572 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
573 " from " << e.mem_obj->memCache.offset << " in " << page);
574
575 slice.size += copied;
576 e.mem_obj->memCache.offset += copied;
577 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
578 }
579
580 /// finds a slot and a free page to fill or throws
581 sfileno
582 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
583 {
584 Ipc::Mem::PageId slot;
585 if (freeSlots->pop(slot)) {
586 debugs(20, 5, "got a previously free slot: " << slot);
587
588 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
589 debugs(20, 5, "and got a previously free page: " << page);
590 return slot.number - 1;
591 } else {
592 debugs(20, 3, "but there is no free page, returning " << slot);
593 freeSlots->push(slot);
594 }
595 }
596
597 // catch free slots delivered to noteFreeMapSlice()
598 assert(!waitingFor);
599 waitingFor.slot = &slot;
600 waitingFor.page = &page;
601 if (map->purgeOne()) {
602 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
603 assert(slot.set());
604 assert(page.set());
605 debugs(20, 5, "got previously busy " << slot << " and " << page);
606 return slot.number - 1;
607 }
608 assert(waitingFor.slot == &slot && waitingFor.page == &page);
609 waitingFor.slot = NULL;
610 waitingFor.page = NULL;
611
612 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
613 throw TexcHere("ran out of mem-cache slots");
614 }
615
616 void
617 MemStore::noteFreeMapSlice(const sfileno sliceId)
618 {
619 Ipc::Mem::PageId &pageId = map->extras(sliceId).page;
620 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
621 assert(pageId);
622 Ipc::Mem::PageId slotId;
623 slotId.pool = SpacePoolId;
624 slotId.number = sliceId + 1;
625 if (!waitingFor) {
626 // must zero pageId before we give slice (and pageId extras!) to others
627 Ipc::Mem::PutPage(pageId);
628 freeSlots->push(slotId);
629 } else {
630 *waitingFor.slot = slotId;
631 *waitingFor.page = pageId;
632 waitingFor.slot = NULL;
633 waitingFor.page = NULL;
634 pageId = Ipc::Mem::PageId();
635 }
636 }
637
638 void
639 MemStore::write(StoreEntry &e)
640 {
641 assert(e.mem_obj);
642
643 debugs(20, 7, "entry " << e);
644
645 switch (e.mem_obj->memCache.io) {
646 case MemObject::ioUndecided:
647 if (!shouldCache(e) || !startCaching(e)) {
648 e.mem_obj->memCache.io = MemObject::ioDone;
649 Store::Root().transientsAbandon(e);
650 return;
651 }
652 break;
653
654 case MemObject::ioDone:
655 case MemObject::ioReading:
656 return; // we should not write in all of the above cases
657
658 case MemObject::ioWriting:
659 break; // already decided to write and still writing
660 }
661
662 try {
663 copyToShm(e);
664 if (e.store_status == STORE_OK) // done receiving new content
665 completeWriting(e);
666 else
667 CollapsedForwarding::Broadcast(e);
668 return;
669 } catch (const std::exception &x) { // TODO: should we catch ... as well?
670 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
671 // fall through to the error handling code
672 }
673
674 disconnect(e);
675 }
676
677 void
678 MemStore::completeWriting(StoreEntry &e)
679 {
680 assert(e.mem_obj);
681 const int32_t index = e.mem_obj->memCache.index;
682 assert(index >= 0);
683 assert(map);
684
685 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
686
687 e.mem_obj->memCache.index = -1;
688 e.mem_obj->memCache.io = MemObject::ioDone;
689 map->closeForWriting(index, false);
690
691 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
692 Store::Root().transientsCompleteWriting(e);
693 }
694
695 void
696 MemStore::markForUnlink(StoreEntry &e)
697 {
698 assert(e.mem_obj);
699 if (e.mem_obj->memCache.index >= 0)
700 map->freeEntry(e.mem_obj->memCache.index);
701 }
702
703 void
704 MemStore::unlink(StoreEntry &e)
705 {
706 if (e.mem_obj && e.mem_obj->memCache.index >= 0) {
707 map->freeEntry(e.mem_obj->memCache.index);
708 disconnect(e);
709 } else {
710 // the entry may have been loaded and then disconnected from the cache
711 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
712 }
713
714 e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
715 }
716
717 void
718 MemStore::disconnect(StoreEntry &e)
719 {
720 assert(e.mem_obj);
721 MemObject &mem_obj = *e.mem_obj;
722 if (mem_obj.memCache.index >= 0) {
723 if (mem_obj.memCache.io == MemObject::ioWriting) {
724 map->abortWriting(mem_obj.memCache.index);
725 mem_obj.memCache.index = -1;
726 mem_obj.memCache.io = MemObject::ioDone;
727 Store::Root().transientsAbandon(e); // broadcasts after the change
728 } else {
729 assert(mem_obj.memCache.io == MemObject::ioReading);
730 map->closeForReading(mem_obj.memCache.index);
731 mem_obj.memCache.index = -1;
732 mem_obj.memCache.io = MemObject::ioDone;
733 }
734 }
735 }
736
737 /// calculates maximum number of entries we need to store and map
738 int64_t
739 MemStore::EntryLimit()
740 {
741 if (!Config.memShared || !Config.memMaxSize)
742 return 0; // no memory cache configured
743
744 const int64_t minEntrySize = Ipc::Mem::PageSize();
745 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
746 return entryLimit;
747 }
748
749 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
750 /// decides whether to use a shared memory cache or checks its configuration;
751 /// and initializes shared memory segments used by MemStore
752 class MemStoreRr: public Ipc::Mem::RegisteredRunner
753 {
754 public:
755 /* RegisteredRunner API */
756 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL) {}
757 virtual void finalizeConfig();
758 virtual void claimMemoryNeeds();
759 virtual void useConfig();
760 virtual ~MemStoreRr();
761
762 protected:
763 /* Ipc::Mem::RegisteredRunner API */
764 virtual void create();
765
766 private:
767 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
768 MemStoreMap::Owner *mapOwner; ///< primary map Owner
769 };
770
771 RunnerRegistrationEntry(MemStoreRr);
772
773 void
774 MemStoreRr::claimMemoryNeeds()
775 {
776 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
777 }
778
779 void
780 MemStoreRr::finalizeConfig()
781 {
782 // decide whether to use a shared memory cache if the user did not specify
783 if (!Config.memShared.configured()) {
784 Config.memShared.configure(Ipc::Atomic::Enabled() &&
785 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
786 Config.memMaxSize > 0);
787 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
788 // bail if the user wants shared memory cache but we cannot support it
789 fatal("memory_cache_shared is on, but no support for atomic operations detected");
790 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
791 fatal("memory_cache_shared is on, but no support for shared memory detected");
792 } else if (Config.memShared && !UsingSmp()) {
793 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
794 " a single worker is running");
795 }
796 }
797
798 void
799 MemStoreRr::useConfig()
800 {
801 assert(Config.memShared.configured());
802 Ipc::Mem::RegisteredRunner::useConfig();
803 }
804
805 void
806 MemStoreRr::create()
807 {
808 if (!Config.memShared)
809 return;
810
811 const int64_t entryLimit = MemStore::EntryLimit();
812 if (entryLimit <= 0) {
813 if (Config.memMaxSize > 0) {
814 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
815 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
816 (Ipc::Mem::PageSize() / 1024.0) << " KB");
817 }
818 return; // no memory cache configured or a misconfiguration
819 }
820
821 Must(!spaceOwner);
822 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
823 entryLimit,
824 sizeof(Ipc::Mem::PageId));
825 Must(!mapOwner);
826 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
827 }
828
829 MemStoreRr::~MemStoreRr()
830 {
831 delete mapOwner;
832 delete spaceOwner;
833 }