]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Boilerplate: update copyright blurbs on src/
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Memory Cache */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "MemStore.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
23 #include "tools.h"
24
25 /// shared memory segment path to use for MemStore maps
26 static const SBuf MapLabel("cache_mem_map");
27 /// shared memory segment path to use for the free slices index
28 static const char *SpaceLabel = "cache_mem_space";
29 /// shared memory segment path to use for IDs of shared pages with slice data
30 static const char *ExtrasLabel = "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
32
33 // We store free slot IDs (i.e., "space") as Page objects so that we can use
34 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
35 // used except for a positivity test. A unique value is handy for debugging.
36 static const uint32_t SpacePoolId = 510716;
37
38 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
39 {
40 }
41
42 MemStore::~MemStore()
43 {
44 delete map;
45 }
46
47 void
48 MemStore::init()
49 {
50 const int64_t entryLimit = EntryLimit();
51 if (entryLimit <= 0)
52 return; // no memory cache configured or a misconfiguration
53
54 // check compatibility with the disk cache, if any
55 if (Config.cacheSwap.n_configured > 0) {
56 const int64_t diskMaxSize = Store::Root().maxObjectSize();
57 const int64_t memMaxSize = maxObjectSize();
58 if (diskMaxSize == -1) {
59 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
60 "is unlimited but mem-cache maximum object size is " <<
61 memMaxSize / 1024.0 << " KB");
62 } else if (diskMaxSize > memMaxSize) {
63 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
64 "is too large for mem-cache: " <<
65 diskMaxSize / 1024.0 << " KB > " <<
66 memMaxSize / 1024.0 << " KB");
67 }
68 }
69
70 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
71 extras = shm_old(Extras)(ExtrasLabel);
72
73 Must(!map);
74 map = new MemStoreMap(MapLabel);
75 map->cleaner = this;
76 }
77
78 void
79 MemStore::getStats(StoreInfoStats &stats) const
80 {
81 const size_t pageSize = Ipc::Mem::PageSize();
82
83 stats.mem.shared = true;
84 stats.mem.capacity =
85 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
86 stats.mem.size =
87 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
88 stats.mem.count = currentCount();
89 }
90
91 void
92 MemStore::stat(StoreEntry &e) const
93 {
94 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
95
96 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
97 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
98 currentSize() / 1024.0,
99 Math::doublePercent(currentSize(), maxSize()));
100
101 if (map) {
102 const int entryLimit = map->entryLimit();
103 const int slotLimit = map->sliceLimit();
104 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
105 if (entryLimit > 0) {
106 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
107 currentCount(), (100.0 * currentCount() / entryLimit));
108 }
109
110 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
111 if (slotLimit > 0) {
112 const unsigned int slotsFree =
113 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
114 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
115 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
116 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
117 usedSlots, (100.0 * usedSlots / slotLimit));
118 }
119
120 if (slotLimit < 100) { // XXX: otherwise too expensive to count
121 Ipc::ReadWriteLockStats stats;
122 map->updateStats(stats);
123 stats.dump(e);
124 }
125 }
126 }
127 }
128
129 void
130 MemStore::maintain()
131 {
132 }
133
134 uint64_t
135 MemStore::minSize() const
136 {
137 return 0; // XXX: irrelevant, but Store parent forces us to implement this
138 }
139
140 uint64_t
141 MemStore::maxSize() const
142 {
143 return Config.memMaxSize;
144 }
145
146 uint64_t
147 MemStore::currentSize() const
148 {
149 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
150 Ipc::Mem::PageSize();
151 }
152
153 uint64_t
154 MemStore::currentCount() const
155 {
156 return map ? map->entryCount() : 0;
157 }
158
159 int64_t
160 MemStore::maxObjectSize() const
161 {
162 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
163 }
164
165 void
166 MemStore::reference(StoreEntry &)
167 {
168 }
169
170 bool
171 MemStore::dereference(StoreEntry &, bool)
172 {
173 // no need to keep e in the global store_table for us; we have our own map
174 return false;
175 }
176
177 int
178 MemStore::callback()
179 {
180 return 0;
181 }
182
183 StoreSearch *
184 MemStore::search(String const, HttpRequest *)
185 {
186 fatal("not implemented");
187 return NULL;
188 }
189
190 StoreEntry *
191 MemStore::get(const cache_key *key)
192 {
193 if (!map)
194 return NULL;
195
196 sfileno index;
197 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
198 if (!slot)
199 return NULL;
200
201 // create a brand new store entry and initialize it with stored info
202 StoreEntry *e = new StoreEntry();
203
204 // XXX: We do not know the URLs yet, only the key, but we need to parse and
205 // store the response for the Root().get() callers to be happy because they
206 // expect IN_MEMORY entries to already have the response headers and body.
207 e->makeMemObject();
208
209 anchorEntry(*e, index, *slot);
210
211 const bool copied = copyFromShm(*e, index, *slot);
212
213 if (copied) {
214 e->hashInsert(key);
215 return e;
216 }
217
218 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
219 map->freeEntry(index); // do not let others into the same trap
220 return NULL;
221 }
222
223 void
224 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
225 {
226 // XXX: not needed but Store parent forces us to implement this
227 fatal("MemStore::get(key,callback,data) should not be called");
228 }
229
230 bool
231 MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
232 {
233 if (!map)
234 return false;
235
236 sfileno index;
237 const Ipc::StoreMapAnchor *const slot = map->openForReading(
238 reinterpret_cast<cache_key*>(collapsed.key), index);
239 if (!slot)
240 return false;
241
242 anchorEntry(collapsed, index, *slot);
243 inSync = updateCollapsedWith(collapsed, index, *slot);
244 return true; // even if inSync is false
245 }
246
247 bool
248 MemStore::updateCollapsed(StoreEntry &collapsed)
249 {
250 assert(collapsed.mem_obj);
251
252 const sfileno index = collapsed.mem_obj->memCache.index;
253
254 // already disconnected from the cache, no need to update
255 if (index < 0)
256 return true;
257
258 if (!map)
259 return false;
260
261 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
262 return updateCollapsedWith(collapsed, index, anchor);
263 }
264
265 /// updates collapsed entry after its anchor has been located
266 bool
267 MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
268 {
269 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
270 const bool copied = copyFromShm(collapsed, index, anchor);
271 return copied;
272 }
273
274 /// anchors StoreEntry to an already locked map entry
275 void
276 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
277 {
278 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
279
280 e.swap_file_sz = basics.swap_file_sz;
281 e.lastref = basics.lastref;
282 e.timestamp = basics.timestamp;
283 e.expires = basics.expires;
284 e.lastmod = basics.lastmod;
285 e.refcount = basics.refcount;
286 e.flags = basics.flags;
287
288 assert(e.mem_obj);
289 if (anchor.complete()) {
290 e.store_status = STORE_OK;
291 e.mem_obj->object_sz = e.swap_file_sz;
292 e.setMemStatus(IN_MEMORY);
293 } else {
294 e.store_status = STORE_PENDING;
295 assert(e.mem_obj->object_sz < 0);
296 e.setMemStatus(NOT_IN_MEMORY);
297 }
298 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
299 e.ping_status = PING_NONE;
300
301 EBIT_CLR(e.flags, RELEASE_REQUEST);
302 EBIT_CLR(e.flags, KEY_PRIVATE);
303 EBIT_SET(e.flags, ENTRY_VALIDATED);
304
305 MemObject::MemCache &mc = e.mem_obj->memCache;
306 mc.index = index;
307 mc.io = MemObject::ioReading;
308 }
309
310 /// copies the entire entry from shared to local memory
311 bool
312 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
313 {
314 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
315 assert(e.mem_obj);
316
317 // emulate the usual Store code but w/o inapplicable checks and callbacks:
318
319 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
320 bool wasEof = anchor.complete() && sid < 0;
321 int64_t sliceOffset = 0;
322 while (sid >= 0) {
323 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
324 // slice state may change during copying; take snapshots now
325 wasEof = anchor.complete() && slice.next < 0;
326 const Ipc::StoreMapSlice::Size wasSize = slice.size;
327
328 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
329 wasEof << " wasSize " << wasSize << " <= " <<
330 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
331 " mem.endOffset " << e.mem_obj->endOffset());
332
333 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
334 // size of the slice data that we already copied
335 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
336 assert(prefixSize <= wasSize);
337
338 const MemStoreMapExtras::Item &extra = extras->items[sid];
339
340 char *page = static_cast<char*>(PagePointer(extra.page));
341 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
342 e.mem_obj->endOffset(),
343 page + prefixSize);
344 if (!copyFromShmSlice(e, sliceBuf, wasEof))
345 return false;
346 debugs(20, 9, "entry " << index << " copied slice " << sid <<
347 " from " << extra.page << '+' << prefixSize);
348 }
349 // else skip a [possibly incomplete] slice that we copied earlier
350
351 // careful: the slice may have grown _and_ gotten the next slice ID!
352 if (slice.next >= 0) {
353 assert(!wasEof);
354 // here we know that slice.size may not change any more
355 if (wasSize >= slice.size) { // did not grow since we started copying
356 sliceOffset += wasSize;
357 sid = slice.next;
358 }
359 } else if (wasSize >= slice.size) { // did not grow
360 break;
361 }
362 }
363
364 if (!wasEof) {
365 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
366 anchor.basics.swap_file_sz << " bytes of " << e);
367 return true;
368 }
369
370 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
371 anchor.basics.swap_file_sz << " bytes of " << e);
372
373 // from StoreEntry::complete()
374 e.mem_obj->object_sz = e.mem_obj->endOffset();
375 e.store_status = STORE_OK;
376 e.setMemStatus(IN_MEMORY);
377
378 assert(e.mem_obj->object_sz >= 0);
379 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
380 // would be nice to call validLength() here, but it needs e.key
381
382 // we read the entire response into the local memory; no more need to lock
383 disconnect(e);
384 return true;
385 }
386
387 /// imports one shared memory slice into local memory
388 bool
389 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
390 {
391 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
392
393 // from store_client::readBody()
394 // parse headers if needed; they might span multiple slices!
395 HttpReply *rep = (HttpReply *)e.getReply();
396 if (rep->pstate < psParsed) {
397 // XXX: have to copy because httpMsgParseStep() requires 0-termination
398 MemBuf mb;
399 mb.init(buf.length+1, buf.length+1);
400 mb.append(buf.data, buf.length);
401 mb.terminate();
402 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
403 if (result > 0) {
404 assert(rep->pstate == psParsed);
405 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
406 } else if (result < 0) {
407 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
408 return false;
409 } else { // more slices are needed
410 assert(!eof);
411 }
412 }
413 debugs(20, 7, "rep pstate: " << rep->pstate);
414
415 // local memory stores both headers and body so copy regardless of pstate
416 const int64_t offBefore = e.mem_obj->endOffset();
417 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
418 const int64_t offAfter = e.mem_obj->endOffset();
419 // expect to write the entire buf because StoreEntry::write() never fails
420 assert(offAfter >= 0 && offBefore <= offAfter &&
421 static_cast<size_t>(offAfter - offBefore) == buf.length);
422 return true;
423 }
424
425 /// whether we should cache the entry
426 bool
427 MemStore::shouldCache(StoreEntry &e) const
428 {
429 if (e.mem_status == IN_MEMORY) {
430 debugs(20, 5, "already loaded from mem-cache: " << e);
431 return false;
432 }
433
434 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
435 debugs(20, 5, "already written to mem-cache: " << e);
436 return false;
437 }
438
439 if (!e.memoryCachable()) {
440 debugs(20, 7, HERE << "Not memory cachable: " << e);
441 return false; // will not cache due to entry state or properties
442 }
443
444 assert(e.mem_obj);
445
446 if (e.mem_obj->vary_headers) {
447 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
448 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
449 return false;
450 }
451
452 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
453
454 // objects of unknown size are not allowed into memory cache, for now
455 if (expectedSize < 0) {
456 debugs(20, 5, "Unknown expected size: " << e);
457 return false;
458 }
459
460 const int64_t loadedSize = e.mem_obj->endOffset();
461 const int64_t ramSize = max(loadedSize, expectedSize);
462
463 if (ramSize > maxObjectSize()) {
464 debugs(20, 5, HERE << "Too big max(" <<
465 loadedSize << ", " << expectedSize << "): " << e);
466 return false; // will not cache due to cachable entry size limits
467 }
468
469 if (!e.mem_obj->isContiguous()) {
470 debugs(20, 5, "not contiguous");
471 return false;
472 }
473
474 if (!map) {
475 debugs(20, 5, HERE << "No map to mem-cache " << e);
476 return false;
477 }
478
479 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
480 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
481 return false;
482 }
483
484 return true;
485 }
486
487 /// locks map anchor and preps to store the entry in shared memory
488 bool
489 MemStore::startCaching(StoreEntry &e)
490 {
491 sfileno index = 0;
492 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
493 if (!slot) {
494 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
495 return false;
496 }
497
498 assert(e.mem_obj);
499 e.mem_obj->memCache.index = index;
500 e.mem_obj->memCache.io = MemObject::ioWriting;
501 slot->set(e);
502 map->startAppending(index);
503 e.memOutDecision(true);
504 return true;
505 }
506
507 /// copies all local data to shared memory
508 void
509 MemStore::copyToShm(StoreEntry &e)
510 {
511 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
512 // not knowing when the wait is over
513 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
514 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
515 return;
516 }
517
518 assert(map);
519 assert(e.mem_obj);
520
521 const int32_t index = e.mem_obj->memCache.index;
522 assert(index >= 0);
523 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
524
525 const int64_t eSize = e.mem_obj->endOffset();
526 if (e.mem_obj->memCache.offset >= eSize) {
527 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
528 e.mem_obj->memCache.offset << " >= " << eSize);
529 return; // nothing to do (yet)
530 }
531
532 if (anchor.start < 0) { // must allocate the very first slot for e
533 Ipc::Mem::PageId page;
534 anchor.start = reserveSapForWriting(page); // throws
535 extras->items[anchor.start].page = page;
536 }
537
538 lastWritingSlice = anchor.start;
539 const size_t sliceCapacity = Ipc::Mem::PageSize();
540
541 // fill, skip slices that are already full
542 // Optimize: remember lastWritingSlice in e.mem_obj
543 while (e.mem_obj->memCache.offset < eSize) {
544 Ipc::StoreMap::Slice &slice =
545 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
546
547 if (slice.size >= sliceCapacity) {
548 if (slice.next >= 0) {
549 lastWritingSlice = slice.next;
550 continue;
551 }
552
553 Ipc::Mem::PageId page;
554 slice.next = lastWritingSlice = reserveSapForWriting(page);
555 extras->items[lastWritingSlice].page = page;
556 debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
557 }
558
559 copyToShmSlice(e, anchor);
560 }
561
562 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
563 }
564
565 /// copies at most one slice worth of local memory to shared memory
566 void
567 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor)
568 {
569 Ipc::StoreMap::Slice &slice =
570 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
571
572 Ipc::Mem::PageId page = extras->items[lastWritingSlice].page;
573 assert(lastWritingSlice >= 0 && page);
574 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
575 page);
576
577 const int64_t bufSize = Ipc::Mem::PageSize();
578 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
579 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
580 static_cast<char*>(PagePointer(page)) + sliceOffset);
581
582 // check that we kept everything or purge incomplete/sparse cached entry
583 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
584 if (copied <= 0) {
585 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
586 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
587 " in " << page);
588 throw TexcHere("data_hdr.copy failure");
589 }
590
591 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
592 " from " << e.mem_obj->memCache.offset << " in " << page);
593
594 slice.size += copied;
595 e.mem_obj->memCache.offset += copied;
596 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
597 }
598
599 /// finds a slot and a free page to fill or throws
600 sfileno
601 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
602 {
603 Ipc::Mem::PageId slot;
604 if (freeSlots->pop(slot)) {
605 debugs(20, 5, "got a previously free slot: " << slot);
606
607 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
608 debugs(20, 5, "and got a previously free page: " << page);
609 return slot.number - 1;
610 } else {
611 debugs(20, 3, "but there is no free page, returning " << slot);
612 freeSlots->push(slot);
613 }
614 }
615
616 // catch free slots delivered to noteFreeMapSlice()
617 assert(!waitingFor);
618 waitingFor.slot = &slot;
619 waitingFor.page = &page;
620 if (map->purgeOne()) {
621 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
622 assert(slot.set());
623 assert(page.set());
624 debugs(20, 5, "got previously busy " << slot << " and " << page);
625 return slot.number - 1;
626 }
627 assert(waitingFor.slot == &slot && waitingFor.page == &page);
628 waitingFor.slot = NULL;
629 waitingFor.page = NULL;
630
631 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
632 throw TexcHere("ran out of mem-cache slots");
633 }
634
635 void
636 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
637 {
638 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
639 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
640 assert(pageId);
641 Ipc::Mem::PageId slotId;
642 slotId.pool = SpacePoolId;
643 slotId.number = sliceId + 1;
644 if (!waitingFor) {
645 // must zero pageId before we give slice (and pageId extras!) to others
646 Ipc::Mem::PutPage(pageId);
647 freeSlots->push(slotId);
648 } else {
649 *waitingFor.slot = slotId;
650 *waitingFor.page = pageId;
651 waitingFor.slot = NULL;
652 waitingFor.page = NULL;
653 pageId = Ipc::Mem::PageId();
654 }
655 }
656
657 void
658 MemStore::write(StoreEntry &e)
659 {
660 assert(e.mem_obj);
661
662 debugs(20, 7, "entry " << e);
663
664 switch (e.mem_obj->memCache.io) {
665 case MemObject::ioUndecided:
666 if (!shouldCache(e) || !startCaching(e)) {
667 e.mem_obj->memCache.io = MemObject::ioDone;
668 e.memOutDecision(false);
669 return;
670 }
671 break;
672
673 case MemObject::ioDone:
674 case MemObject::ioReading:
675 return; // we should not write in all of the above cases
676
677 case MemObject::ioWriting:
678 break; // already decided to write and still writing
679 }
680
681 try {
682 copyToShm(e);
683 if (e.store_status == STORE_OK) // done receiving new content
684 completeWriting(e);
685 else
686 CollapsedForwarding::Broadcast(e);
687 return;
688 } catch (const std::exception &x) { // TODO: should we catch ... as well?
689 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
690 // fall through to the error handling code
691 }
692
693 disconnect(e);
694 }
695
696 void
697 MemStore::completeWriting(StoreEntry &e)
698 {
699 assert(e.mem_obj);
700 const int32_t index = e.mem_obj->memCache.index;
701 assert(index >= 0);
702 assert(map);
703
704 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
705
706 e.mem_obj->memCache.index = -1;
707 e.mem_obj->memCache.io = MemObject::ioDone;
708 map->closeForWriting(index, false);
709
710 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
711 Store::Root().transientsCompleteWriting(e);
712 }
713
714 void
715 MemStore::markForUnlink(StoreEntry &e)
716 {
717 assert(e.mem_obj);
718 if (e.mem_obj->memCache.index >= 0)
719 map->freeEntry(e.mem_obj->memCache.index);
720 }
721
722 void
723 MemStore::unlink(StoreEntry &e)
724 {
725 if (e.mem_obj && e.mem_obj->memCache.index >= 0) {
726 map->freeEntry(e.mem_obj->memCache.index);
727 disconnect(e);
728 } else {
729 // the entry may have been loaded and then disconnected from the cache
730 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
731 }
732
733 e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
734 }
735
736 void
737 MemStore::disconnect(StoreEntry &e)
738 {
739 assert(e.mem_obj);
740 MemObject &mem_obj = *e.mem_obj;
741 if (mem_obj.memCache.index >= 0) {
742 if (mem_obj.memCache.io == MemObject::ioWriting) {
743 map->abortWriting(mem_obj.memCache.index);
744 mem_obj.memCache.index = -1;
745 mem_obj.memCache.io = MemObject::ioDone;
746 Store::Root().transientsAbandon(e); // broadcasts after the change
747 } else {
748 assert(mem_obj.memCache.io == MemObject::ioReading);
749 map->closeForReading(mem_obj.memCache.index);
750 mem_obj.memCache.index = -1;
751 mem_obj.memCache.io = MemObject::ioDone;
752 }
753 }
754 }
755
756 /// calculates maximum number of entries we need to store and map
757 int64_t
758 MemStore::EntryLimit()
759 {
760 if (!Config.memShared || !Config.memMaxSize)
761 return 0; // no memory cache configured
762
763 const int64_t minEntrySize = Ipc::Mem::PageSize();
764 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
765 return entryLimit;
766 }
767
768 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
769 /// decides whether to use a shared memory cache or checks its configuration;
770 /// and initializes shared memory segments used by MemStore
771 class MemStoreRr: public Ipc::Mem::RegisteredRunner
772 {
773 public:
774 /* RegisteredRunner API */
775 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {}
776 virtual void finalizeConfig();
777 virtual void claimMemoryNeeds();
778 virtual void useConfig();
779 virtual ~MemStoreRr();
780
781 protected:
782 /* Ipc::Mem::RegisteredRunner API */
783 virtual void create();
784
785 private:
786 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
787 MemStoreMap::Owner *mapOwner; ///< primary map Owner
788 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
789 };
790
791 RunnerRegistrationEntry(MemStoreRr);
792
793 void
794 MemStoreRr::claimMemoryNeeds()
795 {
796 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
797 }
798
799 void
800 MemStoreRr::finalizeConfig()
801 {
802 // decide whether to use a shared memory cache if the user did not specify
803 if (!Config.memShared.configured()) {
804 Config.memShared.configure(Ipc::Atomic::Enabled() &&
805 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
806 Config.memMaxSize > 0);
807 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
808 // bail if the user wants shared memory cache but we cannot support it
809 fatal("memory_cache_shared is on, but no support for atomic operations detected");
810 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
811 fatal("memory_cache_shared is on, but no support for shared memory detected");
812 } else if (Config.memShared && !UsingSmp()) {
813 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
814 " a single worker is running");
815 }
816 }
817
818 void
819 MemStoreRr::useConfig()
820 {
821 assert(Config.memShared.configured());
822 Ipc::Mem::RegisteredRunner::useConfig();
823 }
824
825 void
826 MemStoreRr::create()
827 {
828 if (!Config.memShared)
829 return;
830
831 const int64_t entryLimit = MemStore::EntryLimit();
832 if (entryLimit <= 0) {
833 if (Config.memMaxSize > 0) {
834 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
835 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
836 (Ipc::Mem::PageSize() / 1024.0) << " KB");
837 }
838 return; // no memory cache configured or a misconfiguration
839 }
840
841 Must(!spaceOwner);
842 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
843 entryLimit, 0);
844 Must(!mapOwner);
845 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
846 Must(!extrasOwner);
847 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
848 }
849
850 MemStoreRr::~MemStoreRr()
851 {
852 delete extrasOwner;
853 delete mapOwner;
854 delete spaceOwner;
855 }