]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Memory Cache */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "MemStore.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
23 #include "tools.h"
24
25 /// shared memory segment path to use for MemStore maps
26 static const SBuf MapLabel("cache_mem_map");
27 /// shared memory segment path to use for the free slices index
28 static const char *SpaceLabel = "cache_mem_space";
29 /// shared memory segment path to use for IDs of shared pages with slice data
30 static const char *ExtrasLabel = "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
32
33 // We store free slot IDs (i.e., "space") as Page objects so that we can use
34 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
35 // used except for a positivity test. A unique value is handy for debugging.
36 static const uint32_t SpacePoolId = 510716;
37
38 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
39 {
40 }
41
42 MemStore::~MemStore()
43 {
44 delete map;
45 }
46
47 void
48 MemStore::init()
49 {
50 const int64_t entryLimit = EntryLimit();
51 if (entryLimit <= 0)
52 return; // no memory cache configured or a misconfiguration
53
54 // check compatibility with the disk cache, if any
55 if (Config.cacheSwap.n_configured > 0) {
56 const int64_t diskMaxSize = Store::Root().maxObjectSize();
57 const int64_t memMaxSize = maxObjectSize();
58 if (diskMaxSize == -1) {
59 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
60 "is unlimited but mem-cache maximum object size is " <<
61 memMaxSize / 1024.0 << " KB");
62 } else if (diskMaxSize > memMaxSize) {
63 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
64 "is too large for mem-cache: " <<
65 diskMaxSize / 1024.0 << " KB > " <<
66 memMaxSize / 1024.0 << " KB");
67 }
68 }
69
70 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
71 extras = shm_old(Extras)(ExtrasLabel);
72
73 Must(!map);
74 map = new MemStoreMap(MapLabel);
75 map->cleaner = this;
76 }
77
78 void
79 MemStore::getStats(StoreInfoStats &stats) const
80 {
81 const size_t pageSize = Ipc::Mem::PageSize();
82
83 stats.mem.shared = true;
84 stats.mem.capacity =
85 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
86 stats.mem.size =
87 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
88 stats.mem.count = currentCount();
89 }
90
91 void
92 MemStore::stat(StoreEntry &e) const
93 {
94 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
95
96 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
97 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
98 currentSize() / 1024.0,
99 Math::doublePercent(currentSize(), maxSize()));
100
101 if (map) {
102 const int entryLimit = map->entryLimit();
103 const int slotLimit = map->sliceLimit();
104 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
105 if (entryLimit > 0) {
106 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
107 currentCount(), (100.0 * currentCount() / entryLimit));
108 }
109
110 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
111 if (slotLimit > 0) {
112 const unsigned int slotsFree =
113 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
114 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
115 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
116 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
117 usedSlots, (100.0 * usedSlots / slotLimit));
118 }
119
120 if (slotLimit < 100) { // XXX: otherwise too expensive to count
121 Ipc::ReadWriteLockStats stats;
122 map->updateStats(stats);
123 stats.dump(e);
124 }
125 }
126 }
127 }
128
129 void
130 MemStore::maintain()
131 {
132 }
133
134 uint64_t
135 MemStore::minSize() const
136 {
137 return 0; // XXX: irrelevant, but Store parent forces us to implement this
138 }
139
140 uint64_t
141 MemStore::maxSize() const
142 {
143 return Config.memMaxSize;
144 }
145
146 uint64_t
147 MemStore::currentSize() const
148 {
149 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
150 Ipc::Mem::PageSize();
151 }
152
153 uint64_t
154 MemStore::currentCount() const
155 {
156 return map ? map->entryCount() : 0;
157 }
158
159 int64_t
160 MemStore::maxObjectSize() const
161 {
162 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
163 }
164
165 void
166 MemStore::reference(StoreEntry &)
167 {
168 }
169
170 bool
171 MemStore::dereference(StoreEntry &)
172 {
173 // no need to keep e in the global store_table for us; we have our own map
174 return false;
175 }
176
177 StoreEntry *
178 MemStore::get(const cache_key *key)
179 {
180 if (!map)
181 return NULL;
182
183 sfileno index;
184 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
185 if (!slot)
186 return NULL;
187
188 // create a brand new store entry and initialize it with stored info
189 StoreEntry *e = new StoreEntry();
190
191 // XXX: We do not know the URLs yet, only the key, but we need to parse and
192 // store the response for the Root().get() callers to be happy because they
193 // expect IN_MEMORY entries to already have the response headers and body.
194 e->makeMemObject();
195
196 anchorEntry(*e, index, *slot);
197
198 const bool copied = copyFromShm(*e, index, *slot);
199
200 if (copied) {
201 e->hashInsert(key);
202 return e;
203 }
204
205 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
206 map->freeEntry(index); // do not let others into the same trap
207 return NULL;
208 }
209
210 bool
211 MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
212 {
213 if (!map)
214 return false;
215
216 sfileno index;
217 const Ipc::StoreMapAnchor *const slot = map->openForReading(
218 reinterpret_cast<cache_key*>(collapsed.key), index);
219 if (!slot)
220 return false;
221
222 anchorEntry(collapsed, index, *slot);
223 inSync = updateCollapsedWith(collapsed, index, *slot);
224 return true; // even if inSync is false
225 }
226
227 bool
228 MemStore::updateCollapsed(StoreEntry &collapsed)
229 {
230 assert(collapsed.mem_obj);
231
232 const sfileno index = collapsed.mem_obj->memCache.index;
233
234 // already disconnected from the cache, no need to update
235 if (index < 0)
236 return true;
237
238 if (!map)
239 return false;
240
241 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
242 return updateCollapsedWith(collapsed, index, anchor);
243 }
244
245 /// updates collapsed entry after its anchor has been located
246 bool
247 MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
248 {
249 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
250 const bool copied = copyFromShm(collapsed, index, anchor);
251 return copied;
252 }
253
254 /// anchors StoreEntry to an already locked map entry
255 void
256 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
257 {
258 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
259
260 e.swap_file_sz = basics.swap_file_sz;
261 e.lastref = basics.lastref;
262 e.timestamp = basics.timestamp;
263 e.expires = basics.expires;
264 e.lastmod = basics.lastmod;
265 e.refcount = basics.refcount;
266 e.flags = basics.flags;
267
268 assert(e.mem_obj);
269 if (anchor.complete()) {
270 e.store_status = STORE_OK;
271 e.mem_obj->object_sz = e.swap_file_sz;
272 e.setMemStatus(IN_MEMORY);
273 } else {
274 e.store_status = STORE_PENDING;
275 assert(e.mem_obj->object_sz < 0);
276 e.setMemStatus(NOT_IN_MEMORY);
277 }
278 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
279 e.ping_status = PING_NONE;
280
281 EBIT_CLR(e.flags, RELEASE_REQUEST);
282 EBIT_CLR(e.flags, KEY_PRIVATE);
283 EBIT_SET(e.flags, ENTRY_VALIDATED);
284
285 MemObject::MemCache &mc = e.mem_obj->memCache;
286 mc.index = index;
287 mc.io = MemObject::ioReading;
288 }
289
290 /// copies the entire entry from shared to local memory
291 bool
292 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
293 {
294 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
295 assert(e.mem_obj);
296
297 // emulate the usual Store code but w/o inapplicable checks and callbacks:
298
299 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
300 bool wasEof = anchor.complete() && sid < 0;
301 int64_t sliceOffset = 0;
302 while (sid >= 0) {
303 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
304 // slice state may change during copying; take snapshots now
305 wasEof = anchor.complete() && slice.next < 0;
306 const Ipc::StoreMapSlice::Size wasSize = slice.size;
307
308 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
309 wasEof << " wasSize " << wasSize << " <= " <<
310 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
311 " mem.endOffset " << e.mem_obj->endOffset());
312
313 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
314 // size of the slice data that we already copied
315 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
316 assert(prefixSize <= wasSize);
317
318 const MemStoreMapExtras::Item &extra = extras->items[sid];
319
320 char *page = static_cast<char*>(PagePointer(extra.page));
321 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
322 e.mem_obj->endOffset(),
323 page + prefixSize);
324 if (!copyFromShmSlice(e, sliceBuf, wasEof))
325 return false;
326 debugs(20, 9, "entry " << index << " copied slice " << sid <<
327 " from " << extra.page << '+' << prefixSize);
328 }
329 // else skip a [possibly incomplete] slice that we copied earlier
330
331 // careful: the slice may have grown _and_ gotten the next slice ID!
332 if (slice.next >= 0) {
333 assert(!wasEof);
334 // here we know that slice.size may not change any more
335 if (wasSize >= slice.size) { // did not grow since we started copying
336 sliceOffset += wasSize;
337 sid = slice.next;
338 }
339 } else if (wasSize >= slice.size) { // did not grow
340 break;
341 }
342 }
343
344 if (!wasEof) {
345 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
346 anchor.basics.swap_file_sz << " bytes of " << e);
347 return true;
348 }
349
350 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
351 anchor.basics.swap_file_sz << " bytes of " << e);
352
353 // from StoreEntry::complete()
354 e.mem_obj->object_sz = e.mem_obj->endOffset();
355 e.store_status = STORE_OK;
356 e.setMemStatus(IN_MEMORY);
357
358 assert(e.mem_obj->object_sz >= 0);
359 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
360 // would be nice to call validLength() here, but it needs e.key
361
362 // we read the entire response into the local memory; no more need to lock
363 disconnect(e);
364 return true;
365 }
366
367 /// imports one shared memory slice into local memory
368 bool
369 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
370 {
371 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
372
373 // from store_client::readBody()
374 // parse headers if needed; they might span multiple slices!
375 HttpReply *rep = (HttpReply *)e.getReply();
376 if (rep->pstate < psParsed) {
377 // XXX: have to copy because httpMsgParseStep() requires 0-termination
378 MemBuf mb;
379 mb.init(buf.length+1, buf.length+1);
380 mb.append(buf.data, buf.length);
381 mb.terminate();
382 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
383 if (result > 0) {
384 assert(rep->pstate == psParsed);
385 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
386 } else if (result < 0) {
387 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
388 return false;
389 } else { // more slices are needed
390 assert(!eof);
391 }
392 }
393 debugs(20, 7, "rep pstate: " << rep->pstate);
394
395 // local memory stores both headers and body so copy regardless of pstate
396 const int64_t offBefore = e.mem_obj->endOffset();
397 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
398 const int64_t offAfter = e.mem_obj->endOffset();
399 // expect to write the entire buf because StoreEntry::write() never fails
400 assert(offAfter >= 0 && offBefore <= offAfter &&
401 static_cast<size_t>(offAfter - offBefore) == buf.length);
402 return true;
403 }
404
405 /// whether we should cache the entry
406 bool
407 MemStore::shouldCache(StoreEntry &e) const
408 {
409 if (e.mem_status == IN_MEMORY) {
410 debugs(20, 5, "already loaded from mem-cache: " << e);
411 return false;
412 }
413
414 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
415 debugs(20, 5, "already written to mem-cache: " << e);
416 return false;
417 }
418
419 if (!e.memoryCachable()) {
420 debugs(20, 7, HERE << "Not memory cachable: " << e);
421 return false; // will not cache due to entry state or properties
422 }
423
424 assert(e.mem_obj);
425
426 if (e.mem_obj->vary_headers) {
427 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
428 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
429 return false;
430 }
431
432 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
433
434 // objects of unknown size are not allowed into memory cache, for now
435 if (expectedSize < 0) {
436 debugs(20, 5, "Unknown expected size: " << e);
437 return false;
438 }
439
440 const int64_t loadedSize = e.mem_obj->endOffset();
441 const int64_t ramSize = max(loadedSize, expectedSize);
442
443 if (ramSize > maxObjectSize()) {
444 debugs(20, 5, HERE << "Too big max(" <<
445 loadedSize << ", " << expectedSize << "): " << e);
446 return false; // will not cache due to cachable entry size limits
447 }
448
449 if (!e.mem_obj->isContiguous()) {
450 debugs(20, 5, "not contiguous");
451 return false;
452 }
453
454 if (!map) {
455 debugs(20, 5, HERE << "No map to mem-cache " << e);
456 return false;
457 }
458
459 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
460 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
461 return false;
462 }
463
464 return true;
465 }
466
467 /// locks map anchor and preps to store the entry in shared memory
468 bool
469 MemStore::startCaching(StoreEntry &e)
470 {
471 sfileno index = 0;
472 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
473 if (!slot) {
474 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
475 return false;
476 }
477
478 assert(e.mem_obj);
479 e.mem_obj->memCache.index = index;
480 e.mem_obj->memCache.io = MemObject::ioWriting;
481 slot->set(e);
482 map->startAppending(index);
483 e.memOutDecision(true);
484 return true;
485 }
486
487 /// copies all local data to shared memory
488 void
489 MemStore::copyToShm(StoreEntry &e)
490 {
491 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
492 // not knowing when the wait is over
493 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
494 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
495 return;
496 }
497
498 assert(map);
499 assert(e.mem_obj);
500
501 const int32_t index = e.mem_obj->memCache.index;
502 assert(index >= 0);
503 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
504
505 const int64_t eSize = e.mem_obj->endOffset();
506 if (e.mem_obj->memCache.offset >= eSize) {
507 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
508 e.mem_obj->memCache.offset << " >= " << eSize);
509 return; // nothing to do (yet)
510 }
511
512 if (anchor.start < 0) { // must allocate the very first slot for e
513 Ipc::Mem::PageId page;
514 anchor.start = reserveSapForWriting(page); // throws
515 extras->items[anchor.start].page = page;
516 }
517
518 lastWritingSlice = anchor.start;
519 const size_t sliceCapacity = Ipc::Mem::PageSize();
520
521 // fill, skip slices that are already full
522 // Optimize: remember lastWritingSlice in e.mem_obj
523 while (e.mem_obj->memCache.offset < eSize) {
524 Ipc::StoreMap::Slice &slice =
525 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
526
527 if (slice.size >= sliceCapacity) {
528 if (slice.next >= 0) {
529 lastWritingSlice = slice.next;
530 continue;
531 }
532
533 Ipc::Mem::PageId page;
534 slice.next = lastWritingSlice = reserveSapForWriting(page);
535 extras->items[lastWritingSlice].page = page;
536 debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
537 }
538
539 copyToShmSlice(e, anchor);
540 }
541
542 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
543 }
544
545 /// copies at most one slice worth of local memory to shared memory
546 void
547 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor)
548 {
549 Ipc::StoreMap::Slice &slice =
550 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
551
552 Ipc::Mem::PageId page = extras->items[lastWritingSlice].page;
553 assert(lastWritingSlice >= 0 && page);
554 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
555 page);
556
557 const int64_t bufSize = Ipc::Mem::PageSize();
558 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
559 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
560 static_cast<char*>(PagePointer(page)) + sliceOffset);
561
562 // check that we kept everything or purge incomplete/sparse cached entry
563 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
564 if (copied <= 0) {
565 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
566 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
567 " in " << page);
568 throw TexcHere("data_hdr.copy failure");
569 }
570
571 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
572 " from " << e.mem_obj->memCache.offset << " in " << page);
573
574 slice.size += copied;
575 e.mem_obj->memCache.offset += copied;
576 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
577 }
578
579 /// finds a slot and a free page to fill or throws
580 sfileno
581 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
582 {
583 Ipc::Mem::PageId slot;
584 if (freeSlots->pop(slot)) {
585 debugs(20, 5, "got a previously free slot: " << slot);
586
587 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
588 debugs(20, 5, "and got a previously free page: " << page);
589 return slot.number - 1;
590 } else {
591 debugs(20, 3, "but there is no free page, returning " << slot);
592 freeSlots->push(slot);
593 }
594 }
595
596 // catch free slots delivered to noteFreeMapSlice()
597 assert(!waitingFor);
598 waitingFor.slot = &slot;
599 waitingFor.page = &page;
600 if (map->purgeOne()) {
601 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
602 assert(slot.set());
603 assert(page.set());
604 debugs(20, 5, "got previously busy " << slot << " and " << page);
605 return slot.number - 1;
606 }
607 assert(waitingFor.slot == &slot && waitingFor.page == &page);
608 waitingFor.slot = NULL;
609 waitingFor.page = NULL;
610
611 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
612 throw TexcHere("ran out of mem-cache slots");
613 }
614
615 void
616 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
617 {
618 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
619 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
620 assert(pageId);
621 Ipc::Mem::PageId slotId;
622 slotId.pool = SpacePoolId;
623 slotId.number = sliceId + 1;
624 if (!waitingFor) {
625 // must zero pageId before we give slice (and pageId extras!) to others
626 Ipc::Mem::PutPage(pageId);
627 freeSlots->push(slotId);
628 } else {
629 *waitingFor.slot = slotId;
630 *waitingFor.page = pageId;
631 waitingFor.slot = NULL;
632 waitingFor.page = NULL;
633 pageId = Ipc::Mem::PageId();
634 }
635 }
636
637 void
638 MemStore::write(StoreEntry &e)
639 {
640 assert(e.mem_obj);
641
642 debugs(20, 7, "entry " << e);
643
644 switch (e.mem_obj->memCache.io) {
645 case MemObject::ioUndecided:
646 if (!shouldCache(e) || !startCaching(e)) {
647 e.mem_obj->memCache.io = MemObject::ioDone;
648 e.memOutDecision(false);
649 return;
650 }
651 break;
652
653 case MemObject::ioDone:
654 case MemObject::ioReading:
655 return; // we should not write in all of the above cases
656
657 case MemObject::ioWriting:
658 break; // already decided to write and still writing
659 }
660
661 try {
662 copyToShm(e);
663 if (e.store_status == STORE_OK) // done receiving new content
664 completeWriting(e);
665 else
666 CollapsedForwarding::Broadcast(e);
667 return;
668 } catch (const std::exception &x) { // TODO: should we catch ... as well?
669 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
670 // fall through to the error handling code
671 }
672
673 disconnect(e);
674 }
675
676 void
677 MemStore::completeWriting(StoreEntry &e)
678 {
679 assert(e.mem_obj);
680 const int32_t index = e.mem_obj->memCache.index;
681 assert(index >= 0);
682 assert(map);
683
684 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
685
686 e.mem_obj->memCache.index = -1;
687 e.mem_obj->memCache.io = MemObject::ioDone;
688 map->closeForWriting(index, false);
689
690 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
691 Store::Root().transientsCompleteWriting(e);
692 }
693
694 void
695 MemStore::markForUnlink(StoreEntry &e)
696 {
697 assert(e.mem_obj);
698 if (e.mem_obj->memCache.index >= 0)
699 map->freeEntry(e.mem_obj->memCache.index);
700 }
701
702 void
703 MemStore::unlink(StoreEntry &e)
704 {
705 if (e.mem_obj && e.mem_obj->memCache.index >= 0) {
706 map->freeEntry(e.mem_obj->memCache.index);
707 disconnect(e);
708 } else if (map) {
709 // the entry may have been loaded and then disconnected from the cache
710 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
711 }
712
713 e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
714 }
715
716 void
717 MemStore::disconnect(StoreEntry &e)
718 {
719 assert(e.mem_obj);
720 MemObject &mem_obj = *e.mem_obj;
721 if (mem_obj.memCache.index >= 0) {
722 if (mem_obj.memCache.io == MemObject::ioWriting) {
723 map->abortWriting(mem_obj.memCache.index);
724 mem_obj.memCache.index = -1;
725 mem_obj.memCache.io = MemObject::ioDone;
726 Store::Root().transientsAbandon(e); // broadcasts after the change
727 } else {
728 assert(mem_obj.memCache.io == MemObject::ioReading);
729 map->closeForReading(mem_obj.memCache.index);
730 mem_obj.memCache.index = -1;
731 mem_obj.memCache.io = MemObject::ioDone;
732 }
733 }
734 }
735
736 /// calculates maximum number of entries we need to store and map
737 int64_t
738 MemStore::EntryLimit()
739 {
740 if (!Config.memShared || !Config.memMaxSize)
741 return 0; // no memory cache configured
742
743 const int64_t minEntrySize = Ipc::Mem::PageSize();
744 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
745 return entryLimit;
746 }
747
748 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
749 /// decides whether to use a shared memory cache or checks its configuration;
750 /// and initializes shared memory segments used by MemStore
751 class MemStoreRr: public Ipc::Mem::RegisteredRunner
752 {
753 public:
754 /* RegisteredRunner API */
755 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {}
756 virtual void finalizeConfig();
757 virtual void claimMemoryNeeds();
758 virtual void useConfig();
759 virtual ~MemStoreRr();
760
761 protected:
762 /* Ipc::Mem::RegisteredRunner API */
763 virtual void create();
764
765 private:
766 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
767 MemStoreMap::Owner *mapOwner; ///< primary map Owner
768 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
769 };
770
771 RunnerRegistrationEntry(MemStoreRr);
772
773 void
774 MemStoreRr::claimMemoryNeeds()
775 {
776 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
777 }
778
779 void
780 MemStoreRr::finalizeConfig()
781 {
782 // decide whether to use a shared memory cache if the user did not specify
783 if (!Config.memShared.configured()) {
784 Config.memShared.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
785 Config.memMaxSize > 0);
786 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
787 fatal("memory_cache_shared is on, but no support for shared memory detected");
788 } else if (Config.memShared && !UsingSmp()) {
789 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
790 " a single worker is running");
791 }
792 }
793
794 void
795 MemStoreRr::useConfig()
796 {
797 assert(Config.memShared.configured());
798 Ipc::Mem::RegisteredRunner::useConfig();
799 }
800
801 void
802 MemStoreRr::create()
803 {
804 if (!Config.memShared)
805 return;
806
807 const int64_t entryLimit = MemStore::EntryLimit();
808 if (entryLimit <= 0) {
809 if (Config.memMaxSize > 0) {
810 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
811 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
812 (Ipc::Mem::PageSize() / 1024.0) << " KB");
813 }
814 return; // no memory cache configured or a misconfiguration
815 }
816
817 Must(!spaceOwner);
818 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
819 entryLimit, 0);
820 Must(!mapOwner);
821 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
822 Must(!extrasOwner);
823 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
824 }
825
826 MemStoreRr::~MemStoreRr()
827 {
828 delete extrasOwner;
829 delete mapOwner;
830 delete spaceOwner;
831 }
832