]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Maintenance: Update astyle version to 3.1 (#841)
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Memory Cache */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "MemStore.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
23 #include "tools.h"
24
25 /// shared memory segment path to use for MemStore maps
26 static const SBuf MapLabel("cache_mem_map");
27 /// shared memory segment path to use for the free slices index
28 static const char *SpaceLabel = "cache_mem_space";
29 /// shared memory segment path to use for IDs of shared pages with slice data
30 static const char *ExtrasLabel = "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
32
33 /// Packs to shared memory, allocating new slots/pages as needed.
34 /// Requires an Ipc::StoreMapAnchor locked for writing.
35 class ShmWriter: public Packable
36 {
37 public:
38 ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1);
39
40 /* Packable API */
41 virtual void append(const char *aBuf, int aSize) override;
42 virtual void vappendf(const char *fmt, va_list ap) override;
43
44 public:
45 StoreEntry *entry; ///< the entry being updated
46
47 /// the slot keeping the first byte of the appended content (at least)
48 /// either set via constructor parameter or allocated by the first append
49 Ipc::StoreMapSliceId firstSlice;
50
51 /// the slot keeping the last byte of the appended content (at least)
52 Ipc::StoreMapSliceId lastSlice;
53
54 uint64_t totalWritten; ///< cumulative number of bytes appended so far
55
56 protected:
57 void copyToShm();
58 void copyToShmSlice(Ipc::StoreMap::Slice &slice);
59
60 private:
61 MemStore &store;
62 const sfileno fileNo;
63
64 /* set by (and only valid during) append calls */
65 const char *buf; ///< content being appended now
66 int bufSize; ///< buf size
67 int bufWritten; ///< buf bytes appended so far
68 };
69
70 /* ShmWriter */
71
72 ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice):
73 entry(anEntry),
74 firstSlice(aFirstSlice),
75 lastSlice(firstSlice),
76 totalWritten(0),
77 store(aStore),
78 fileNo(aFileNo),
79 buf(nullptr),
80 bufSize(0),
81 bufWritten(0)
82 {
83 Must(entry);
84 }
85
86 void
87 ShmWriter::append(const char *aBuf, int aBufSize)
88 {
89 Must(!buf);
90 buf = aBuf;
91 bufSize = aBufSize;
92 if (bufSize) {
93 Must(buf);
94 bufWritten = 0;
95 copyToShm();
96 }
97 buf = nullptr;
98 bufSize = 0;
99 bufWritten = 0;
100 }
101
102 void
103 ShmWriter::vappendf(const char *fmt, va_list ap)
104 {
105 SBuf vaBuf;
106 va_list apCopy;
107 va_copy(apCopy, ap);
108 vaBuf.vappendf(fmt, apCopy);
109 va_end(apCopy);
110 append(vaBuf.rawContent(), vaBuf.length());
111 }
112
113 /// copies the entire buffer to shared memory
114 void
115 ShmWriter::copyToShm()
116 {
117 Must(bufSize > 0); // do not use up shared memory pages for nothing
118 Must(firstSlice < 0 || lastSlice >= 0);
119
120 // fill, skip slices that are already full
121 while (bufWritten < bufSize) {
122 Ipc::StoreMap::Slice &slice = store.nextAppendableSlice(fileNo, lastSlice);
123 if (firstSlice < 0)
124 firstSlice = lastSlice;
125 copyToShmSlice(slice);
126 }
127
128 debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry);
129 }
130
131 /// copies at most one slice worth of buffer to shared memory
132 void
133 ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice &slice)
134 {
135 Ipc::Mem::PageId page = store.pageForSlice(lastSlice);
136 debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " <<
137 page);
138
139 Must(bufWritten <= bufSize);
140 const int64_t writingDebt = bufSize - bufWritten;
141 const int64_t pageSize = Ipc::Mem::PageSize();
142 const int64_t sliceOffset = totalWritten % pageSize;
143 const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
144 memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten,
145 copySize);
146
147 debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " <<
148 entry << " from " << sliceOffset << " in " << page);
149
150 slice.size += copySize;
151 bufWritten += copySize;
152 totalWritten += copySize;
153 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
154
155 // either we wrote everything or we filled the entire slice
156 Must(bufWritten == bufSize || sliceOffset + copySize == pageSize);
157 }
158
159 /* MemStore */
160
161 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
162 {
163 }
164
165 MemStore::~MemStore()
166 {
167 delete map;
168 }
169
170 void
171 MemStore::init()
172 {
173 const int64_t entryLimit = EntryLimit();
174 if (entryLimit <= 0)
175 return; // no shared memory cache configured or a misconfiguration
176
177 // check compatibility with the disk cache, if any
178 if (Config.cacheSwap.n_configured > 0) {
179 const int64_t diskMaxSize = Store::Root().maxObjectSize();
180 const int64_t memMaxSize = maxObjectSize();
181 if (diskMaxSize == -1) {
182 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
183 "is unlimited but mem-cache maximum object size is " <<
184 memMaxSize / 1024.0 << " KB");
185 } else if (diskMaxSize > memMaxSize) {
186 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
187 "is too large for mem-cache: " <<
188 diskMaxSize / 1024.0 << " KB > " <<
189 memMaxSize / 1024.0 << " KB");
190 }
191 }
192
193 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
194 extras = shm_old(Extras)(ExtrasLabel);
195
196 Must(!map);
197 map = new MemStoreMap(MapLabel);
198 map->cleaner = this;
199 }
200
201 void
202 MemStore::getStats(StoreInfoStats &stats) const
203 {
204 const size_t pageSize = Ipc::Mem::PageSize();
205
206 stats.mem.shared = true;
207 stats.mem.capacity =
208 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
209 stats.mem.size =
210 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
211 stats.mem.count = currentCount();
212 }
213
214 void
215 MemStore::stat(StoreEntry &e) const
216 {
217 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
218
219 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
220 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
221 currentSize() / 1024.0,
222 Math::doublePercent(currentSize(), maxSize()));
223
224 if (map) {
225 const int entryLimit = map->entryLimit();
226 const int slotLimit = map->sliceLimit();
227 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
228 if (entryLimit > 0) {
229 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
230 currentCount(), (100.0 * currentCount() / entryLimit));
231 }
232
233 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
234 if (slotLimit > 0) {
235 const unsigned int slotsFree =
236 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
237 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
238 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
239 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
240 usedSlots, (100.0 * usedSlots / slotLimit));
241 }
242
243 if (slotLimit < 100) { // XXX: otherwise too expensive to count
244 Ipc::ReadWriteLockStats stats;
245 map->updateStats(stats);
246 stats.dump(e);
247 }
248 }
249 }
250 }
251
252 void
253 MemStore::maintain()
254 {
255 }
256
257 uint64_t
258 MemStore::minSize() const
259 {
260 return 0; // XXX: irrelevant, but Store parent forces us to implement this
261 }
262
263 uint64_t
264 MemStore::maxSize() const
265 {
266 return Config.memMaxSize;
267 }
268
269 uint64_t
270 MemStore::currentSize() const
271 {
272 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
273 Ipc::Mem::PageSize();
274 }
275
276 uint64_t
277 MemStore::currentCount() const
278 {
279 return map ? map->entryCount() : 0;
280 }
281
282 int64_t
283 MemStore::maxObjectSize() const
284 {
285 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
286 }
287
288 void
289 MemStore::reference(StoreEntry &)
290 {
291 }
292
293 bool
294 MemStore::dereference(StoreEntry &)
295 {
296 // no need to keep e in the global store_table for us; we have our own map
297 return false;
298 }
299
300 StoreEntry *
301 MemStore::get(const cache_key *key)
302 {
303 if (!map)
304 return NULL;
305
306 sfileno index;
307 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
308 if (!slot)
309 return NULL;
310
311 // create a brand new store entry and initialize it with stored info
312 StoreEntry *e = new StoreEntry();
313
314 // XXX: We do not know the URLs yet, only the key, but we need to parse and
315 // store the response for the Root().find() callers to be happy because they
316 // expect IN_MEMORY entries to already have the response headers and body.
317 e->createMemObject();
318
319 anchorEntry(*e, index, *slot);
320
321 const bool copied = copyFromShm(*e, index, *slot);
322
323 if (copied)
324 return e;
325
326 debugs(20, 3, "failed for " << *e);
327 map->freeEntry(index); // do not let others into the same trap
328 destroyStoreEntry(static_cast<hash_link *>(e));
329 return NULL;
330 }
331
332 void
333 MemStore::updateHeaders(StoreEntry *updatedE)
334 {
335 if (!map)
336 return;
337
338 Ipc::StoreMapUpdate update(updatedE);
339 assert(updatedE);
340 assert(updatedE->mem_obj);
341 if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index))
342 return;
343
344 try {
345 updateHeadersOrThrow(update);
346 } catch (const std::exception &ex) {
347 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
348 map->abortUpdating(update);
349 }
350 }
351
352 void
353 MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
354 {
355 // our +/- hdr_sz math below does not work if the chains differ [in size]
356 Must(update.stale.anchor->basics.swap_file_sz == update.fresh.anchor->basics.swap_file_sz);
357
358 const uint64_t staleHdrSz = update.entry->mem().baseReply().hdr_sz;
359 debugs(20, 7, "stale hdr_sz: " << staleHdrSz);
360
361 /* we will need to copy same-slice payload after the stored headers later */
362 Must(staleHdrSz > 0);
363 update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz);
364 Must(update.stale.splicingPoint >= 0);
365 Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz);
366
367 Must(update.stale.anchor);
368 ShmWriter writer(*this, update.entry, update.fresh.fileNo);
369 update.entry->mem().freshestReply().packHeadersUsingSlowPacker(writer);
370 const uint64_t freshHdrSz = writer.totalWritten;
371 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz));
372
373 /* copy same-slice payload remaining after the stored headers */
374 const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint);
375 const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize();
376 const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity;
377 Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier
378 Must(slice.size >= headersInLastSlice);
379 const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice;
380 const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint];
381 char *page = static_cast<char*>(PagePointer(extra.page));
382 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice);
383 writer.append(page + headersInLastSlice, payloadInLastSlice);
384 update.fresh.splicingPoint = writer.lastSlice;
385
386 update.fresh.anchor->basics.swap_file_sz -= staleHdrSz;
387 update.fresh.anchor->basics.swap_file_sz += freshHdrSz;
388
389 map->closeForUpdating(update);
390 }
391
392 bool
393 MemStore::anchorToCache(StoreEntry &entry, bool &inSync)
394 {
395 if (!map)
396 return false;
397
398 sfileno index;
399 const Ipc::StoreMapAnchor *const slot = map->openForReading(
400 reinterpret_cast<cache_key*>(entry.key), index);
401 if (!slot)
402 return false;
403
404 anchorEntry(entry, index, *slot);
405 inSync = updateAnchoredWith(entry, index, *slot);
406 return true; // even if inSync is false
407 }
408
409 bool
410 MemStore::updateAnchored(StoreEntry &entry)
411 {
412 if (!map)
413 return false;
414
415 assert(entry.mem_obj);
416 assert(entry.hasMemStore());
417 const sfileno index = entry.mem_obj->memCache.index;
418 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
419 return updateAnchoredWith(entry, index, anchor);
420 }
421
422 /// updates Transients entry after its anchor has been located
423 bool
424 MemStore::updateAnchoredWith(StoreEntry &entry, const sfileno index, const Ipc::StoreMapAnchor &anchor)
425 {
426 entry.swap_file_sz = anchor.basics.swap_file_sz;
427 const bool copied = copyFromShm(entry, index, anchor);
428 return copied;
429 }
430
431 /// anchors StoreEntry to an already locked map entry
432 void
433 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
434 {
435 assert(!e.hasDisk()); // no conflict with disk entry basics
436 anchor.exportInto(e);
437
438 assert(e.mem_obj);
439 if (anchor.complete()) {
440 e.store_status = STORE_OK;
441 e.mem_obj->object_sz = e.swap_file_sz;
442 e.setMemStatus(IN_MEMORY);
443 } else {
444 e.store_status = STORE_PENDING;
445 assert(e.mem_obj->object_sz < 0);
446 e.setMemStatus(NOT_IN_MEMORY);
447 }
448
449 EBIT_SET(e.flags, ENTRY_VALIDATED);
450
451 MemObject::MemCache &mc = e.mem_obj->memCache;
452 mc.index = index;
453 mc.io = MemObject::ioReading;
454 }
455
456 /// copies the entire entry from shared to local memory
457 bool
458 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
459 {
460 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
461 assert(e.mem_obj);
462
463 // emulate the usual Store code but w/o inapplicable checks and callbacks:
464
465 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
466 bool wasEof = anchor.complete() && sid < 0;
467 int64_t sliceOffset = 0;
468 while (sid >= 0) {
469 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
470 // slice state may change during copying; take snapshots now
471 wasEof = anchor.complete() && slice.next < 0;
472 const Ipc::StoreMapSlice::Size wasSize = slice.size;
473
474 debugs(20, 8, "entry " << index << " slice " << sid << " eof " <<
475 wasEof << " wasSize " << wasSize << " <= " <<
476 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
477 " mem.endOffset " << e.mem_obj->endOffset());
478
479 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
480 // size of the slice data that we already copied
481 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
482 assert(prefixSize <= wasSize);
483
484 const MemStoreMapExtras::Item &extra = extras->items[sid];
485
486 char *page = static_cast<char*>(PagePointer(extra.page));
487 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
488 e.mem_obj->endOffset(),
489 page + prefixSize);
490 if (!copyFromShmSlice(e, sliceBuf, wasEof))
491 return false;
492 debugs(20, 8, "entry " << index << " copied slice " << sid <<
493 " from " << extra.page << '+' << prefixSize);
494 }
495 // else skip a [possibly incomplete] slice that we copied earlier
496
497 // careful: the slice may have grown _and_ gotten the next slice ID!
498 if (slice.next >= 0) {
499 assert(!wasEof);
500 // here we know that slice.size may not change any more
501 if (wasSize >= slice.size) { // did not grow since we started copying
502 sliceOffset += wasSize;
503 sid = slice.next;
504 }
505 } else if (wasSize >= slice.size) { // did not grow
506 break;
507 }
508 }
509
510 if (!wasEof) {
511 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
512 anchor.basics.swap_file_sz << " bytes of " << e);
513 return true;
514 }
515
516 debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
517 anchor.basics.swap_file_sz << " bytes of " << e);
518
519 // from StoreEntry::complete()
520 e.mem_obj->object_sz = e.mem_obj->endOffset();
521 e.store_status = STORE_OK;
522 e.setMemStatus(IN_MEMORY);
523
524 assert(e.mem_obj->object_sz >= 0);
525 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
526 // would be nice to call validLength() here, but it needs e.key
527
528 // we read the entire response into the local memory; no more need to lock
529 disconnect(e);
530 return true;
531 }
532
533 /// imports one shared memory slice into local memory
534 bool
535 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
536 {
537 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
538
539 // from store_client::readBody()
540 // parse headers if needed; they might span multiple slices!
541 const auto rep = &e.mem().adjustableBaseReply();
542 if (rep->pstate < Http::Message::psParsed) {
543 // XXX: have to copy because httpMsgParseStep() requires 0-termination
544 MemBuf mb;
545 mb.init(buf.length+1, buf.length+1);
546 mb.append(buf.data, buf.length);
547 mb.terminate();
548 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
549 if (result > 0) {
550 assert(rep->pstate == Http::Message::psParsed);
551 } else if (result < 0) {
552 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
553 return false;
554 } else { // more slices are needed
555 assert(!eof);
556 }
557 }
558 debugs(20, 7, "rep pstate: " << rep->pstate);
559
560 // local memory stores both headers and body so copy regardless of pstate
561 const int64_t offBefore = e.mem_obj->endOffset();
562 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
563 const int64_t offAfter = e.mem_obj->endOffset();
564 // expect to write the entire buf because StoreEntry::write() never fails
565 assert(offAfter >= 0 && offBefore <= offAfter &&
566 static_cast<size_t>(offAfter - offBefore) == buf.length);
567 return true;
568 }
569
570 /// whether we should cache the entry
571 bool
572 MemStore::shouldCache(StoreEntry &e) const
573 {
574 if (e.mem_status == IN_MEMORY) {
575 debugs(20, 5, "already loaded from mem-cache: " << e);
576 return false;
577 }
578
579 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
580 debugs(20, 5, "already written to mem-cache: " << e);
581 return false;
582 }
583
584 if (!e.memoryCachable()) {
585 debugs(20, 7, HERE << "Not memory cachable: " << e);
586 return false; // will not cache due to entry state or properties
587 }
588
589 assert(e.mem_obj);
590
591 if (!e.mem_obj->vary_headers.isEmpty()) {
592 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
593 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
594 return false;
595 }
596
597 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
598 const int64_t loadedSize = e.mem_obj->endOffset();
599 const int64_t ramSize = max(loadedSize, expectedSize);
600 if (ramSize > maxObjectSize()) {
601 debugs(20, 5, HERE << "Too big max(" <<
602 loadedSize << ", " << expectedSize << "): " << e);
603 return false; // will not cache due to cachable entry size limits
604 }
605
606 if (!e.mem_obj->isContiguous()) {
607 debugs(20, 5, "not contiguous");
608 return false;
609 }
610
611 if (!map) {
612 debugs(20, 5, HERE << "No map to mem-cache " << e);
613 return false;
614 }
615
616 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
617 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
618 return false;
619 }
620
621 return true;
622 }
623
624 /// locks map anchor and preps to store the entry in shared memory
625 bool
626 MemStore::startCaching(StoreEntry &e)
627 {
628 sfileno index = 0;
629 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
630 if (!slot) {
631 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
632 return false;
633 }
634
635 assert(e.mem_obj);
636 e.mem_obj->memCache.index = index;
637 e.mem_obj->memCache.io = MemObject::ioWriting;
638 slot->set(e);
639 // Do not allow others to feed off an unknown-size entry because we will
640 // stop swapping it out if it grows too large.
641 if (e.mem_obj->expectedReplySize() >= 0)
642 map->startAppending(index);
643 e.memOutDecision(true);
644 return true;
645 }
646
647 /// copies all local data to shared memory
648 void
649 MemStore::copyToShm(StoreEntry &e)
650 {
651 assert(map);
652 assert(e.mem_obj);
653 Must(!EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT));
654
655 const int64_t eSize = e.mem_obj->endOffset();
656 if (e.mem_obj->memCache.offset >= eSize) {
657 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
658 e.mem_obj->memCache.offset << " >= " << eSize);
659 return; // nothing to do (yet)
660 }
661
662 // throw if an accepted unknown-size entry grew too big or max-size changed
663 Must(eSize <= maxObjectSize());
664
665 const int32_t index = e.mem_obj->memCache.index;
666 assert(index >= 0);
667 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
668 lastWritingSlice = anchor.start;
669
670 // fill, skip slices that are already full
671 // Optimize: remember lastWritingSlice in e.mem_obj
672 while (e.mem_obj->memCache.offset < eSize) {
673 Ipc::StoreMap::Slice &slice = nextAppendableSlice(
674 e.mem_obj->memCache.index, lastWritingSlice);
675 if (anchor.start < 0)
676 anchor.start = lastWritingSlice;
677 copyToShmSlice(e, anchor, slice);
678 }
679
680 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
681 }
682
683 /// copies at most one slice worth of local memory to shared memory
684 void
685 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
686 {
687 Ipc::Mem::PageId page = pageForSlice(lastWritingSlice);
688 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
689 page);
690
691 const int64_t bufSize = Ipc::Mem::PageSize();
692 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
693 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
694 static_cast<char*>(PagePointer(page)) + sliceOffset);
695
696 // check that we kept everything or purge incomplete/sparse cached entry
697 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
698 if (copied <= 0) {
699 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
700 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
701 " in " << page);
702 throw TexcHere("data_hdr.copy failure");
703 }
704
705 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
706 " from " << e.mem_obj->memCache.offset << " in " << page);
707
708 slice.size += copied;
709 e.mem_obj->memCache.offset += copied;
710 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
711 }
712
713 /// starts checking with the entry chain slice at a given offset and
714 /// returns a not-full (but not necessarily empty) slice, updating sliceOffset
715 Ipc::StoreMap::Slice &
716 MemStore::nextAppendableSlice(const sfileno fileNo, sfileno &sliceOffset)
717 {
718 // allocate the very first slot for the entry if needed
719 if (sliceOffset < 0) {
720 Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo);
721 Must(anchor.start < 0);
722 Ipc::Mem::PageId page;
723 sliceOffset = reserveSapForWriting(page); // throws
724 extras->items[sliceOffset].page = page;
725 anchor.start = sliceOffset;
726 }
727
728 const size_t sliceCapacity = Ipc::Mem::PageSize();
729 do {
730 Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset);
731
732 if (slice.size >= sliceCapacity) {
733 if (slice.next >= 0) {
734 sliceOffset = slice.next;
735 continue;
736 }
737
738 Ipc::Mem::PageId page;
739 slice.next = sliceOffset = reserveSapForWriting(page);
740 extras->items[sliceOffset].page = page;
741 debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset);
742 continue; // to get and return the slice at the new sliceOffset
743 }
744
745 return slice;
746 } while (true);
747 /* not reached */
748 }
749
750 /// safely returns a previously allocated memory page for the given entry slice
751 Ipc::Mem::PageId
752 MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId)
753 {
754 Must(extras);
755 Must(sliceId >= 0);
756 Ipc::Mem::PageId page = extras->items[sliceId].page;
757 Must(page);
758 return page;
759 }
760
761 /// finds a slot and a free page to fill or throws
762 sfileno
763 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
764 {
765 Ipc::Mem::PageId slot;
766 if (freeSlots->pop(slot)) {
767 const auto slotId = slot.number - 1;
768 debugs(20, 5, "got a previously free slot: " << slotId);
769
770 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
771 debugs(20, 5, "and got a previously free page: " << page);
772 map->prepFreeSlice(slotId);
773 return slotId;
774 } else {
775 debugs(20, 3, "but there is no free page, returning " << slotId);
776 freeSlots->push(slot);
777 }
778 }
779
780 // catch free slots delivered to noteFreeMapSlice()
781 assert(!waitingFor);
782 waitingFor.slot = &slot;
783 waitingFor.page = &page;
784 if (map->purgeOne()) {
785 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
786 assert(slot.set());
787 assert(page.set());
788 const auto slotId = slot.number - 1;
789 map->prepFreeSlice(slotId);
790 debugs(20, 5, "got previously busy " << slotId << " and " << page);
791 return slotId;
792 }
793 assert(waitingFor.slot == &slot && waitingFor.page == &page);
794 waitingFor.slot = NULL;
795 waitingFor.page = NULL;
796
797 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
798 throw TexcHere("ran out of mem-cache slots");
799 }
800
801 void
802 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
803 {
804 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
805 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
806 assert(pageId);
807 Ipc::Mem::PageId slotId;
808 slotId.pool = Ipc::Mem::PageStack::IdForMemStoreSpace();
809 slotId.number = sliceId + 1;
810 if (!waitingFor) {
811 // must zero pageId before we give slice (and pageId extras!) to others
812 Ipc::Mem::PutPage(pageId);
813 freeSlots->push(slotId);
814 } else {
815 *waitingFor.slot = slotId;
816 *waitingFor.page = pageId;
817 waitingFor.slot = NULL;
818 waitingFor.page = NULL;
819 pageId = Ipc::Mem::PageId();
820 }
821 }
822
823 void
824 MemStore::write(StoreEntry &e)
825 {
826 assert(e.mem_obj);
827
828 debugs(20, 7, "entry " << e);
829
830 switch (e.mem_obj->memCache.io) {
831 case MemObject::ioUndecided:
832 if (!shouldCache(e) || !startCaching(e)) {
833 e.mem_obj->memCache.io = MemObject::ioDone;
834 e.memOutDecision(false);
835 return;
836 }
837 break;
838
839 case MemObject::ioDone:
840 case MemObject::ioReading:
841 return; // we should not write in all of the above cases
842
843 case MemObject::ioWriting:
844 break; // already decided to write and still writing
845 }
846
847 try {
848 copyToShm(e);
849 if (e.store_status == STORE_OK) // done receiving new content
850 completeWriting(e);
851 else
852 CollapsedForwarding::Broadcast(e);
853 return;
854 } catch (const std::exception &x) { // TODO: should we catch ... as well?
855 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
856 // fall through to the error handling code
857 }
858
859 disconnect(e);
860 }
861
862 void
863 MemStore::completeWriting(StoreEntry &e)
864 {
865 assert(e.mem_obj);
866 const int32_t index = e.mem_obj->memCache.index;
867 assert(index >= 0);
868 assert(map);
869
870 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
871
872 e.mem_obj->memCache.index = -1;
873 e.mem_obj->memCache.io = MemObject::ioDone;
874 map->closeForWriting(index);
875
876 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
877 Store::Root().transientsCompleteWriting(e);
878 }
879
880 void
881 MemStore::evictCached(StoreEntry &e)
882 {
883 debugs(47, 5, e);
884 if (e.hasMemStore()) {
885 if (map->freeEntry(e.mem_obj->memCache.index))
886 CollapsedForwarding::Broadcast(e);
887 if (!e.locked()) {
888 disconnect(e);
889 e.destroyMemObject();
890 }
891 } else if (const auto key = e.publicKey()) {
892 // the entry may have been loaded and then disconnected from the cache
893 evictIfFound(key);
894 if (!e.locked())
895 e.destroyMemObject();
896 }
897 }
898
899 void
900 MemStore::evictIfFound(const cache_key *key)
901 {
902 if (map)
903 map->freeEntryByKey(key);
904 }
905
906 void
907 MemStore::disconnect(StoreEntry &e)
908 {
909 assert(e.mem_obj);
910 MemObject &mem_obj = *e.mem_obj;
911 if (e.hasMemStore()) {
912 if (mem_obj.memCache.io == MemObject::ioWriting) {
913 map->abortWriting(mem_obj.memCache.index);
914 mem_obj.memCache.index = -1;
915 mem_obj.memCache.io = MemObject::ioDone;
916 Store::Root().stopSharing(e); // broadcasts after the change
917 } else {
918 assert(mem_obj.memCache.io == MemObject::ioReading);
919 map->closeForReading(mem_obj.memCache.index);
920 mem_obj.memCache.index = -1;
921 mem_obj.memCache.io = MemObject::ioDone;
922 }
923 }
924 }
925
926 bool
927 MemStore::Requested()
928 {
929 return Config.memShared && Config.memMaxSize > 0;
930 }
931
932 /// calculates maximum number of entries we need to store and map
933 int64_t
934 MemStore::EntryLimit()
935 {
936 if (!Requested())
937 return 0;
938
939 const int64_t minEntrySize = Ipc::Mem::PageSize();
940 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
941 return entryLimit;
942 }
943
944 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
945 /// decides whether to use a shared memory cache or checks its configuration;
946 /// and initializes shared memory segments used by MemStore
947 class MemStoreRr: public Ipc::Mem::RegisteredRunner
948 {
949 public:
950 /* RegisteredRunner API */
951 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {}
952 virtual void finalizeConfig();
953 virtual void claimMemoryNeeds();
954 virtual void useConfig();
955 virtual ~MemStoreRr();
956
957 protected:
958 /* Ipc::Mem::RegisteredRunner API */
959 virtual void create();
960
961 private:
962 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
963 MemStoreMap::Owner *mapOwner; ///< primary map Owner
964 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
965 };
966
967 RunnerRegistrationEntry(MemStoreRr);
968
969 void
970 MemStoreRr::claimMemoryNeeds()
971 {
972 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
973 }
974
975 void
976 MemStoreRr::finalizeConfig()
977 {
978 // decide whether to use a shared memory cache if the user did not specify
979 if (!Config.memShared.configured()) {
980 Config.memShared.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
981 Config.memMaxSize > 0);
982 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
983 fatal("memory_cache_shared is on, but no support for shared memory detected");
984 } else if (Config.memShared && !UsingSmp()) {
985 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
986 " a single worker is running");
987 }
988
989 if (MemStore::Requested() && Config.memMaxSize < Ipc::Mem::PageSize()) {
990 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" <<
991 (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
992 (Ipc::Mem::PageSize() / 1024.0) << " KB");
993 }
994 }
995
996 void
997 MemStoreRr::useConfig()
998 {
999 assert(Config.memShared.configured());
1000 Ipc::Mem::RegisteredRunner::useConfig();
1001 }
1002
1003 void
1004 MemStoreRr::create()
1005 {
1006 if (!MemStore::Enabled())
1007 return;
1008
1009 const int64_t entryLimit = MemStore::EntryLimit();
1010 assert(entryLimit > 0);
1011
1012 Ipc::Mem::PageStack::Config spaceConfig;
1013 spaceConfig.poolId = Ipc::Mem::PageStack::IdForMemStoreSpace();
1014 spaceConfig.pageSize = 0; // the pages are stored in Ipc::Mem::Pages
1015 spaceConfig.capacity = entryLimit;
1016 spaceConfig.createFull = true; // all pages are initially available
1017 Must(!spaceOwner);
1018 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, spaceConfig);
1019 Must(!mapOwner);
1020 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
1021 Must(!extrasOwner);
1022 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
1023 }
1024
1025 MemStoreRr::~MemStoreRr()
1026 {
1027 delete extrasOwner;
1028 delete mapOwner;
1029 delete spaceOwner;
1030 }
1031