]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Rock and shared memory caches fixes/improvements.
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * DEBUG: section 20 Memory Cache
3 *
4 */
5
6 #include "squid.h"
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
9 #include "HttpReply.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "MemStore.h"
14 #include "mime_header.h"
15 #include "SquidConfig.h"
16 #include "SquidMath.h"
17 #include "StoreStats.h"
18 #include "tools.h"
19
20 /// shared memory segment path to use for MemStore maps
21 static const SBuf MapLabel("cache_mem_map");
22 /// shared memory segment path to use for the free slices index
23 static const char *SpaceLabel = "cache_mem_space";
24 /// shared memory segment path to use for IDs of shared pages with slice data
25 static const char *ExtrasLabel = "cache_mem_ex";
26 // TODO: sync with Rock::SwapDir::*Path()
27
28 // We store free slot IDs (i.e., "space") as Page objects so that we can use
29 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
30 // used except for a positivity test. A unique value is handy for debugging.
31 static const uint32_t SpacePoolId = 510716;
32
33 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
34 {
35 }
36
37 MemStore::~MemStore()
38 {
39 delete map;
40 }
41
42 void
43 MemStore::init()
44 {
45 const int64_t entryLimit = EntryLimit();
46 if (entryLimit <= 0)
47 return; // no memory cache configured or a misconfiguration
48
49 // check compatibility with the disk cache, if any
50 if (Config.cacheSwap.n_configured > 0) {
51 const int64_t diskMaxSize = Store::Root().maxObjectSize();
52 const int64_t memMaxSize = maxObjectSize();
53 if (diskMaxSize == -1) {
54 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
55 "is unlimited but mem-cache maximum object size is " <<
56 memMaxSize / 1024.0 << " KB");
57 } else if (diskMaxSize > memMaxSize) {
58 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
59 "is too large for mem-cache: " <<
60 diskMaxSize / 1024.0 << " KB > " <<
61 memMaxSize / 1024.0 << " KB");
62 }
63 }
64
65 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
66 extras = shm_old(Extras)(ExtrasLabel);
67
68 Must(!map);
69 map = new MemStoreMap(MapLabel);
70 map->cleaner = this;
71 }
72
73 void
74 MemStore::getStats(StoreInfoStats &stats) const
75 {
76 const size_t pageSize = Ipc::Mem::PageSize();
77
78 stats.mem.shared = true;
79 stats.mem.capacity =
80 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
81 stats.mem.size =
82 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
83 stats.mem.count = currentCount();
84 }
85
86 void
87 MemStore::stat(StoreEntry &e) const
88 {
89 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
90
91 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
92 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
93 currentSize() / 1024.0,
94 Math::doublePercent(currentSize(), maxSize()));
95
96 if (map) {
97 const int entryLimit = map->entryLimit();
98 const int slotLimit = map->sliceLimit();
99 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
100 if (entryLimit > 0) {
101 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
102 currentCount(), (100.0 * currentCount() / entryLimit));
103 }
104
105 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
106 if (slotLimit > 0) {
107 const unsigned int slotsFree =
108 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
109 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
110 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
111 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
112 usedSlots, (100.0 * usedSlots / slotLimit));
113 }
114
115 if (slotLimit < 100) { // XXX: otherwise too expensive to count
116 Ipc::ReadWriteLockStats stats;
117 map->updateStats(stats);
118 stats.dump(e);
119 }
120 }
121 }
122 }
123
124 void
125 MemStore::maintain()
126 {
127 }
128
129 uint64_t
130 MemStore::minSize() const
131 {
132 return 0; // XXX: irrelevant, but Store parent forces us to implement this
133 }
134
135 uint64_t
136 MemStore::maxSize() const
137 {
138 return Config.memMaxSize;
139 }
140
141 uint64_t
142 MemStore::currentSize() const
143 {
144 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
145 Ipc::Mem::PageSize();
146 }
147
148 uint64_t
149 MemStore::currentCount() const
150 {
151 return map ? map->entryCount() : 0;
152 }
153
154 int64_t
155 MemStore::maxObjectSize() const
156 {
157 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
158 }
159
160 void
161 MemStore::reference(StoreEntry &)
162 {
163 }
164
165 bool
166 MemStore::dereference(StoreEntry &, bool)
167 {
168 // no need to keep e in the global store_table for us; we have our own map
169 return false;
170 }
171
172 int
173 MemStore::callback()
174 {
175 return 0;
176 }
177
178 StoreSearch *
179 MemStore::search(String const, HttpRequest *)
180 {
181 fatal("not implemented");
182 return NULL;
183 }
184
185 StoreEntry *
186 MemStore::get(const cache_key *key)
187 {
188 if (!map)
189 return NULL;
190
191 sfileno index;
192 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
193 if (!slot)
194 return NULL;
195
196 // create a brand new store entry and initialize it with stored info
197 StoreEntry *e = new StoreEntry();
198
199 // XXX: We do not know the URLs yet, only the key, but we need to parse and
200 // store the response for the Root().get() callers to be happy because they
201 // expect IN_MEMORY entries to already have the response headers and body.
202 e->makeMemObject();
203
204 anchorEntry(*e, index, *slot);
205
206 const bool copied = copyFromShm(*e, index, *slot);
207
208 if (copied) {
209 e->hashInsert(key);
210 return e;
211 }
212
213 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
214 map->freeEntry(index); // do not let others into the same trap
215 return NULL;
216 }
217
218 void
219 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
220 {
221 // XXX: not needed but Store parent forces us to implement this
222 fatal("MemStore::get(key,callback,data) should not be called");
223 }
224
225 bool
226 MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
227 {
228 if (!map)
229 return false;
230
231 sfileno index;
232 const Ipc::StoreMapAnchor *const slot = map->openForReading(
233 reinterpret_cast<cache_key*>(collapsed.key), index);
234 if (!slot)
235 return false;
236
237 anchorEntry(collapsed, index, *slot);
238 inSync = updateCollapsedWith(collapsed, index, *slot);
239 return true; // even if inSync is false
240 }
241
242 bool
243 MemStore::updateCollapsed(StoreEntry &collapsed)
244 {
245 assert(collapsed.mem_obj);
246
247 const sfileno index = collapsed.mem_obj->memCache.index;
248
249 // already disconnected from the cache, no need to update
250 if (index < 0)
251 return true;
252
253 if (!map)
254 return false;
255
256 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
257 return updateCollapsedWith(collapsed, index, anchor);
258 }
259
260 /// updates collapsed entry after its anchor has been located
261 bool
262 MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
263 {
264 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
265 const bool copied = copyFromShm(collapsed, index, anchor);
266 return copied;
267 }
268
269 /// anchors StoreEntry to an already locked map entry
270 void
271 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
272 {
273 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
274
275 e.swap_file_sz = basics.swap_file_sz;
276 e.lastref = basics.lastref;
277 e.timestamp = basics.timestamp;
278 e.expires = basics.expires;
279 e.lastmod = basics.lastmod;
280 e.refcount = basics.refcount;
281 e.flags = basics.flags;
282
283 assert(e.mem_obj);
284 if (anchor.complete()) {
285 e.store_status = STORE_OK;
286 e.mem_obj->object_sz = e.swap_file_sz;
287 e.setMemStatus(IN_MEMORY);
288 } else {
289 e.store_status = STORE_PENDING;
290 assert(e.mem_obj->object_sz < 0);
291 e.setMemStatus(NOT_IN_MEMORY);
292 }
293 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
294 e.ping_status = PING_NONE;
295
296 EBIT_CLR(e.flags, RELEASE_REQUEST);
297 EBIT_CLR(e.flags, KEY_PRIVATE);
298 EBIT_SET(e.flags, ENTRY_VALIDATED);
299
300 MemObject::MemCache &mc = e.mem_obj->memCache;
301 mc.index = index;
302 mc.io = MemObject::ioReading;
303 }
304
305 /// copies the entire entry from shared to local memory
306 bool
307 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
308 {
309 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
310 assert(e.mem_obj);
311
312 // emulate the usual Store code but w/o inapplicable checks and callbacks:
313
314 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
315 bool wasEof = anchor.complete() && sid < 0;
316 int64_t sliceOffset = 0;
317 while (sid >= 0) {
318 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
319 // slice state may change during copying; take snapshots now
320 wasEof = anchor.complete() && slice.next < 0;
321 const Ipc::StoreMapSlice::Size wasSize = slice.size;
322
323 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
324 wasEof << " wasSize " << wasSize << " <= " <<
325 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
326 " mem.endOffset " << e.mem_obj->endOffset());
327
328 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
329 // size of the slice data that we already copied
330 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
331 assert(prefixSize <= wasSize);
332
333 const MemStoreMapExtras::Item &extra = extras->items[sid];
334
335 char *page = static_cast<char*>(PagePointer(extra.page));
336 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
337 e.mem_obj->endOffset(),
338 page + prefixSize);
339 if (!copyFromShmSlice(e, sliceBuf, wasEof))
340 return false;
341 debugs(20, 9, "entry " << index << " copied slice " << sid <<
342 " from " << extra.page << '+' << prefixSize);
343 }
344 // else skip a [possibly incomplete] slice that we copied earlier
345
346 // careful: the slice may have grown _and_ gotten the next slice ID!
347 if (slice.next >= 0) {
348 assert(!wasEof);
349 // here we know that slice.size may not change any more
350 if (wasSize >= slice.size) { // did not grow since we started copying
351 sliceOffset += wasSize;
352 sid = slice.next;
353 }
354 } else if (wasSize >= slice.size) { // did not grow
355 break;
356 }
357 }
358
359 if (!wasEof) {
360 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
361 anchor.basics.swap_file_sz << " bytes of " << e);
362 return true;
363 }
364
365 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
366 anchor.basics.swap_file_sz << " bytes of " << e);
367
368 // from StoreEntry::complete()
369 e.mem_obj->object_sz = e.mem_obj->endOffset();
370 e.store_status = STORE_OK;
371 e.setMemStatus(IN_MEMORY);
372
373 assert(e.mem_obj->object_sz >= 0);
374 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
375 // would be nice to call validLength() here, but it needs e.key
376
377 // we read the entire response into the local memory; no more need to lock
378 disconnect(e);
379 return true;
380 }
381
382 /// imports one shared memory slice into local memory
383 bool
384 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
385 {
386 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
387
388 // from store_client::readBody()
389 // parse headers if needed; they might span multiple slices!
390 HttpReply *rep = (HttpReply *)e.getReply();
391 if (rep->pstate < psParsed) {
392 // XXX: have to copy because httpMsgParseStep() requires 0-termination
393 MemBuf mb;
394 mb.init(buf.length+1, buf.length+1);
395 mb.append(buf.data, buf.length);
396 mb.terminate();
397 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
398 if (result > 0) {
399 assert(rep->pstate == psParsed);
400 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
401 } else if (result < 0) {
402 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
403 return false;
404 } else { // more slices are needed
405 assert(!eof);
406 }
407 }
408 debugs(20, 7, "rep pstate: " << rep->pstate);
409
410 // local memory stores both headers and body so copy regardless of pstate
411 const int64_t offBefore = e.mem_obj->endOffset();
412 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
413 const int64_t offAfter = e.mem_obj->endOffset();
414 // expect to write the entire buf because StoreEntry::write() never fails
415 assert(offAfter >= 0 && offBefore <= offAfter &&
416 static_cast<size_t>(offAfter - offBefore) == buf.length);
417 return true;
418 }
419
420 /// whether we should cache the entry
421 bool
422 MemStore::shouldCache(StoreEntry &e) const
423 {
424 if (e.mem_status == IN_MEMORY) {
425 debugs(20, 5, "already loaded from mem-cache: " << e);
426 return false;
427 }
428
429 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
430 debugs(20, 5, "already written to mem-cache: " << e);
431 return false;
432 }
433
434 if (!e.memoryCachable()) {
435 debugs(20, 7, HERE << "Not memory cachable: " << e);
436 return false; // will not cache due to entry state or properties
437 }
438
439 assert(e.mem_obj);
440
441 if (e.mem_obj->vary_headers) {
442 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
443 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
444 return false;
445 }
446
447 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
448
449 // objects of unknown size are not allowed into memory cache, for now
450 if (expectedSize < 0) {
451 debugs(20, 5, "Unknown expected size: " << e);
452 return false;
453 }
454
455 const int64_t loadedSize = e.mem_obj->endOffset();
456 const int64_t ramSize = max(loadedSize, expectedSize);
457
458 if (ramSize > maxObjectSize()) {
459 debugs(20, 5, HERE << "Too big max(" <<
460 loadedSize << ", " << expectedSize << "): " << e);
461 return false; // will not cache due to cachable entry size limits
462 }
463
464 if (!e.mem_obj->isContiguous()) {
465 debugs(20, 5, "not contiguous");
466 return false;
467 }
468
469 if (!map) {
470 debugs(20, 5, HERE << "No map to mem-cache " << e);
471 return false;
472 }
473
474 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
475 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
476 return false;
477 }
478
479 return true;
480 }
481
482 /// locks map anchor and preps to store the entry in shared memory
483 bool
484 MemStore::startCaching(StoreEntry &e)
485 {
486 sfileno index = 0;
487 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
488 if (!slot) {
489 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
490 return false;
491 }
492
493 assert(e.mem_obj);
494 e.mem_obj->memCache.index = index;
495 e.mem_obj->memCache.io = MemObject::ioWriting;
496 slot->set(e);
497 map->startAppending(index);
498 return true;
499 }
500
501 /// copies all local data to shared memory
502 void
503 MemStore::copyToShm(StoreEntry &e)
504 {
505 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
506 // not knowing when the wait is over
507 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
508 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
509 return;
510 }
511
512 assert(map);
513 assert(e.mem_obj);
514
515 const int32_t index = e.mem_obj->memCache.index;
516 assert(index >= 0);
517 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
518
519 const int64_t eSize = e.mem_obj->endOffset();
520 if (e.mem_obj->memCache.offset >= eSize) {
521 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
522 e.mem_obj->memCache.offset << " >= " << eSize);
523 return; // nothing to do (yet)
524 }
525
526 if (anchor.start < 0) { // must allocate the very first slot for e
527 Ipc::Mem::PageId page;
528 anchor.start = reserveSapForWriting(page); // throws
529 extras->items[anchor.start].page = page;
530 }
531
532 lastWritingSlice = anchor.start;
533 const size_t sliceCapacity = Ipc::Mem::PageSize();
534
535 // fill, skip slices that are already full
536 // Optimize: remember lastWritingSlice in e.mem_obj
537 while (e.mem_obj->memCache.offset < eSize) {
538 Ipc::StoreMap::Slice &slice =
539 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
540
541 if (slice.size >= sliceCapacity) {
542 if (slice.next >= 0) {
543 lastWritingSlice = slice.next;
544 continue;
545 }
546
547 Ipc::Mem::PageId page;
548 slice.next = lastWritingSlice = reserveSapForWriting(page);
549 extras->items[lastWritingSlice].page = page;
550 debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
551 }
552
553 copyToShmSlice(e, anchor);
554 }
555
556 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
557 }
558
559 /// copies at most one slice worth of local memory to shared memory
560 void
561 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor)
562 {
563 Ipc::StoreMap::Slice &slice =
564 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
565
566 Ipc::Mem::PageId page = extras->items[lastWritingSlice].page;
567 assert(lastWritingSlice >= 0 && page);
568 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
569 page);
570
571 const int64_t bufSize = Ipc::Mem::PageSize();
572 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
573 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
574 static_cast<char*>(PagePointer(page)) + sliceOffset);
575
576 // check that we kept everything or purge incomplete/sparse cached entry
577 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
578 if (copied <= 0) {
579 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
580 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
581 " in " << page);
582 throw TexcHere("data_hdr.copy failure");
583 }
584
585 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
586 " from " << e.mem_obj->memCache.offset << " in " << page);
587
588 slice.size += copied;
589 e.mem_obj->memCache.offset += copied;
590 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
591 }
592
593 /// finds a slot and a free page to fill or throws
594 sfileno
595 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
596 {
597 Ipc::Mem::PageId slot;
598 if (freeSlots->pop(slot)) {
599 debugs(20, 5, "got a previously free slot: " << slot);
600
601 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
602 debugs(20, 5, "and got a previously free page: " << page);
603 return slot.number - 1;
604 } else {
605 debugs(20, 3, "but there is no free page, returning " << slot);
606 freeSlots->push(slot);
607 }
608 }
609
610 // catch free slots delivered to noteFreeMapSlice()
611 assert(!waitingFor);
612 waitingFor.slot = &slot;
613 waitingFor.page = &page;
614 if (map->purgeOne()) {
615 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
616 assert(slot.set());
617 assert(page.set());
618 debugs(20, 5, "got previously busy " << slot << " and " << page);
619 return slot.number - 1;
620 }
621 assert(waitingFor.slot == &slot && waitingFor.page == &page);
622 waitingFor.slot = NULL;
623 waitingFor.page = NULL;
624
625 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
626 throw TexcHere("ran out of mem-cache slots");
627 }
628
629 void
630 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
631 {
632 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
633 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
634 assert(pageId);
635 Ipc::Mem::PageId slotId;
636 slotId.pool = SpacePoolId;
637 slotId.number = sliceId + 1;
638 if (!waitingFor) {
639 // must zero pageId before we give slice (and pageId extras!) to others
640 Ipc::Mem::PutPage(pageId);
641 freeSlots->push(slotId);
642 } else {
643 *waitingFor.slot = slotId;
644 *waitingFor.page = pageId;
645 waitingFor.slot = NULL;
646 waitingFor.page = NULL;
647 pageId = Ipc::Mem::PageId();
648 }
649 }
650
651 void
652 MemStore::write(StoreEntry &e)
653 {
654 assert(e.mem_obj);
655
656 debugs(20, 7, "entry " << e);
657
658 switch (e.mem_obj->memCache.io) {
659 case MemObject::ioUndecided:
660 if (!shouldCache(e) || !startCaching(e)) {
661 e.mem_obj->memCache.io = MemObject::ioDone;
662 Store::Root().transientsAbandon(e);
663 return;
664 }
665 break;
666
667 case MemObject::ioDone:
668 case MemObject::ioReading:
669 return; // we should not write in all of the above cases
670
671 case MemObject::ioWriting:
672 break; // already decided to write and still writing
673 }
674
675 try {
676 copyToShm(e);
677 if (e.store_status == STORE_OK) // done receiving new content
678 completeWriting(e);
679 else
680 CollapsedForwarding::Broadcast(e);
681 return;
682 } catch (const std::exception &x) { // TODO: should we catch ... as well?
683 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
684 // fall through to the error handling code
685 }
686
687 disconnect(e);
688 }
689
690 void
691 MemStore::completeWriting(StoreEntry &e)
692 {
693 assert(e.mem_obj);
694 const int32_t index = e.mem_obj->memCache.index;
695 assert(index >= 0);
696 assert(map);
697
698 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
699
700 e.mem_obj->memCache.index = -1;
701 e.mem_obj->memCache.io = MemObject::ioDone;
702 map->closeForWriting(index, false);
703
704 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
705 Store::Root().transientsCompleteWriting(e);
706 }
707
708 void
709 MemStore::markForUnlink(StoreEntry &e)
710 {
711 assert(e.mem_obj);
712 if (e.mem_obj->memCache.index >= 0)
713 map->freeEntry(e.mem_obj->memCache.index);
714 }
715
716 void
717 MemStore::unlink(StoreEntry &e)
718 {
719 if (e.mem_obj && e.mem_obj->memCache.index >= 0) {
720 map->freeEntry(e.mem_obj->memCache.index);
721 disconnect(e);
722 } else {
723 // the entry may have been loaded and then disconnected from the cache
724 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
725 }
726
727 e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
728 }
729
730 void
731 MemStore::disconnect(StoreEntry &e)
732 {
733 assert(e.mem_obj);
734 MemObject &mem_obj = *e.mem_obj;
735 if (mem_obj.memCache.index >= 0) {
736 if (mem_obj.memCache.io == MemObject::ioWriting) {
737 map->abortWriting(mem_obj.memCache.index);
738 mem_obj.memCache.index = -1;
739 mem_obj.memCache.io = MemObject::ioDone;
740 Store::Root().transientsAbandon(e); // broadcasts after the change
741 } else {
742 assert(mem_obj.memCache.io == MemObject::ioReading);
743 map->closeForReading(mem_obj.memCache.index);
744 mem_obj.memCache.index = -1;
745 mem_obj.memCache.io = MemObject::ioDone;
746 }
747 }
748 }
749
750 /// calculates maximum number of entries we need to store and map
751 int64_t
752 MemStore::EntryLimit()
753 {
754 if (!Config.memShared || !Config.memMaxSize)
755 return 0; // no memory cache configured
756
757 const int64_t minEntrySize = Ipc::Mem::PageSize();
758 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
759 return entryLimit;
760 }
761
762 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
763 /// decides whether to use a shared memory cache or checks its configuration;
764 /// and initializes shared memory segments used by MemStore
765 class MemStoreRr: public Ipc::Mem::RegisteredRunner
766 {
767 public:
768 /* RegisteredRunner API */
769 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {}
770 virtual void finalizeConfig();
771 virtual void claimMemoryNeeds();
772 virtual void useConfig();
773 virtual ~MemStoreRr();
774
775 protected:
776 /* Ipc::Mem::RegisteredRunner API */
777 virtual void create();
778
779 private:
780 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
781 MemStoreMap::Owner *mapOwner; ///< primary map Owner
782 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
783 };
784
785 RunnerRegistrationEntry(MemStoreRr);
786
787 void
788 MemStoreRr::claimMemoryNeeds()
789 {
790 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
791 }
792
793 void
794 MemStoreRr::finalizeConfig()
795 {
796 // decide whether to use a shared memory cache if the user did not specify
797 if (!Config.memShared.configured()) {
798 Config.memShared.configure(Ipc::Atomic::Enabled() &&
799 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
800 Config.memMaxSize > 0);
801 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
802 // bail if the user wants shared memory cache but we cannot support it
803 fatal("memory_cache_shared is on, but no support for atomic operations detected");
804 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
805 fatal("memory_cache_shared is on, but no support for shared memory detected");
806 } else if (Config.memShared && !UsingSmp()) {
807 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
808 " a single worker is running");
809 }
810 }
811
812 void
813 MemStoreRr::useConfig()
814 {
815 assert(Config.memShared.configured());
816 Ipc::Mem::RegisteredRunner::useConfig();
817 }
818
819 void
820 MemStoreRr::create()
821 {
822 if (!Config.memShared)
823 return;
824
825 const int64_t entryLimit = MemStore::EntryLimit();
826 if (entryLimit <= 0) {
827 if (Config.memMaxSize > 0) {
828 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
829 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
830 (Ipc::Mem::PageSize() / 1024.0) << " KB");
831 }
832 return; // no memory cache configured or a misconfiguration
833 }
834
835 Must(!spaceOwner);
836 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
837 entryLimit, 0);
838 Must(!mapOwner);
839 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
840 Must(!extrasOwner);
841 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
842 }
843
844 MemStoreRr::~MemStoreRr()
845 {
846 delete extrasOwner;
847 delete mapOwner;
848 delete spaceOwner;
849 }