]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Maintenance: rework SASL detection (#1694)
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Memory Cache */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "MemStore.h"
19 #include "mime_header.h"
20 #include "sbuf/SBuf.h"
21 #include "sbuf/Stream.h"
22 #include "SquidConfig.h"
23 #include "SquidMath.h"
24 #include "StoreStats.h"
25 #include "tools.h"
26
27 /// shared memory segment path to use for MemStore maps
28 static const SBuf MapLabel("cache_mem_map");
29 /// shared memory segment path to use for the free slices index
30 static const char *SpaceLabel = "cache_mem_space";
31 /// shared memory segment path to use for IDs of shared pages with slice data
32 static const char *ExtrasLabel = "cache_mem_ex";
33 // TODO: sync with Rock::SwapDir::*Path()
34
35 /// Packs to shared memory, allocating new slots/pages as needed.
36 /// Requires an Ipc::StoreMapAnchor locked for writing.
37 class ShmWriter: public Packable
38 {
39 public:
40 ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1);
41
42 /* Packable API */
43 void append(const char *aBuf, int aSize) override;
44 void vappendf(const char *fmt, va_list ap) override;
45
46 public:
47 StoreEntry *entry; ///< the entry being updated
48
49 /// the slot keeping the first byte of the appended content (at least)
50 /// either set via constructor parameter or allocated by the first append
51 Ipc::StoreMapSliceId firstSlice;
52
53 /// the slot keeping the last byte of the appended content (at least)
54 Ipc::StoreMapSliceId lastSlice;
55
56 uint64_t totalWritten; ///< cumulative number of bytes appended so far
57
58 protected:
59 void copyToShm();
60 void copyToShmSlice(Ipc::StoreMap::Slice &slice);
61
62 private:
63 MemStore &store;
64 const sfileno fileNo;
65
66 /* set by (and only valid during) append calls */
67 const char *buf; ///< content being appended now
68 int bufSize; ///< buf size
69 int bufWritten; ///< buf bytes appended so far
70 };
71
72 /* ShmWriter */
73
74 ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice):
75 entry(anEntry),
76 firstSlice(aFirstSlice),
77 lastSlice(firstSlice),
78 totalWritten(0),
79 store(aStore),
80 fileNo(aFileNo),
81 buf(nullptr),
82 bufSize(0),
83 bufWritten(0)
84 {
85 Must(entry);
86 }
87
88 void
89 ShmWriter::append(const char *aBuf, int aBufSize)
90 {
91 Must(!buf);
92 buf = aBuf;
93 bufSize = aBufSize;
94 if (bufSize) {
95 Must(buf);
96 bufWritten = 0;
97 copyToShm();
98 }
99 buf = nullptr;
100 bufSize = 0;
101 bufWritten = 0;
102 }
103
104 void
105 ShmWriter::vappendf(const char *fmt, va_list ap)
106 {
107 SBuf vaBuf;
108 va_list apCopy;
109 va_copy(apCopy, ap);
110 vaBuf.vappendf(fmt, apCopy);
111 va_end(apCopy);
112 append(vaBuf.rawContent(), vaBuf.length());
113 }
114
115 /// copies the entire buffer to shared memory
116 void
117 ShmWriter::copyToShm()
118 {
119 Must(bufSize > 0); // do not use up shared memory pages for nothing
120 Must(firstSlice < 0 || lastSlice >= 0);
121
122 // fill, skip slices that are already full
123 while (bufWritten < bufSize) {
124 Ipc::StoreMap::Slice &slice = store.nextAppendableSlice(fileNo, lastSlice);
125 if (firstSlice < 0)
126 firstSlice = lastSlice;
127 copyToShmSlice(slice);
128 }
129
130 debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry);
131 }
132
133 /// copies at most one slice worth of buffer to shared memory
134 void
135 ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice &slice)
136 {
137 Ipc::Mem::PageId page = store.pageForSlice(lastSlice);
138 debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " <<
139 page);
140
141 Must(bufWritten <= bufSize);
142 const int64_t writingDebt = bufSize - bufWritten;
143 const int64_t pageSize = Ipc::Mem::PageSize();
144 const int64_t sliceOffset = totalWritten % pageSize;
145 const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
146 memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten,
147 copySize);
148
149 debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " <<
150 entry << " from " << sliceOffset << " in " << page);
151
152 slice.size += copySize;
153 bufWritten += copySize;
154 totalWritten += copySize;
155 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
156
157 // either we wrote everything or we filled the entire slice
158 Must(bufWritten == bufSize || sliceOffset + copySize == pageSize);
159 }
160
161 /* MemStore */
162
163 MemStore::MemStore(): map(nullptr), lastWritingSlice(-1)
164 {
165 }
166
167 MemStore::~MemStore()
168 {
169 delete map;
170 }
171
172 void
173 MemStore::init()
174 {
175 const int64_t entryLimit = EntryLimit();
176 if (entryLimit <= 0)
177 return; // no shared memory cache configured or a misconfiguration
178
179 // check compatibility with the disk cache, if any
180 if (Config.cacheSwap.n_configured > 0) {
181 const int64_t diskMaxSize = Store::Root().maxObjectSize();
182 const int64_t memMaxSize = maxObjectSize();
183 if (diskMaxSize == -1) {
184 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
185 "is unlimited but mem-cache maximum object size is " <<
186 memMaxSize / 1024.0 << " KB");
187 } else if (diskMaxSize > memMaxSize) {
188 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
189 "is too large for mem-cache: " <<
190 diskMaxSize / 1024.0 << " KB > " <<
191 memMaxSize / 1024.0 << " KB");
192 }
193 }
194
195 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
196 extras = shm_old(Extras)(ExtrasLabel);
197
198 Must(!map);
199 map = new MemStoreMap(MapLabel);
200 map->cleaner = this;
201 }
202
203 void
204 MemStore::getStats(StoreInfoStats &stats) const
205 {
206 const size_t pageSize = Ipc::Mem::PageSize();
207
208 stats.mem.shared = true;
209 stats.mem.capacity =
210 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
211 stats.mem.size =
212 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
213 stats.mem.count = currentCount();
214 }
215
216 void
217 MemStore::stat(StoreEntry &e) const
218 {
219 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
220
221 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
222 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
223 currentSize() / 1024.0,
224 Math::doublePercent(currentSize(), maxSize()));
225
226 if (map) {
227 const int entryLimit = map->entryLimit();
228 const int slotLimit = map->sliceLimit();
229 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
230 if (entryLimit > 0) {
231 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
232 currentCount(), (100.0 * currentCount() / entryLimit));
233 }
234
235 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
236 if (slotLimit > 0) {
237 const unsigned int slotsFree =
238 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
239 if (slotsFree <= static_cast<unsigned int>(slotLimit)) {
240 const int usedSlots = slotLimit - static_cast<int>(slotsFree);
241 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
242 usedSlots, (100.0 * usedSlots / slotLimit));
243 }
244
245 if (slotLimit < 100) { // XXX: otherwise too expensive to count
246 Ipc::ReadWriteLockStats stats;
247 map->updateStats(stats);
248 stats.dump(e);
249 }
250 }
251 }
252 }
253
254 void
255 MemStore::maintain()
256 {
257 }
258
259 uint64_t
260 MemStore::minSize() const
261 {
262 return 0; // XXX: irrelevant, but Store parent forces us to implement this
263 }
264
265 uint64_t
266 MemStore::maxSize() const
267 {
268 return Config.memMaxSize;
269 }
270
271 uint64_t
272 MemStore::currentSize() const
273 {
274 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
275 Ipc::Mem::PageSize();
276 }
277
278 uint64_t
279 MemStore::currentCount() const
280 {
281 return map ? map->entryCount() : 0;
282 }
283
284 int64_t
285 MemStore::maxObjectSize() const
286 {
287 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
288 }
289
290 void
291 MemStore::reference(StoreEntry &)
292 {
293 }
294
295 bool
296 MemStore::dereference(StoreEntry &)
297 {
298 // no need to keep e in the global store_table for us; we have our own map
299 return false;
300 }
301
302 StoreEntry *
303 MemStore::get(const cache_key *key)
304 {
305 if (!map)
306 return nullptr;
307
308 sfileno index;
309 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
310 if (!slot)
311 return nullptr;
312
313 // create a brand new store entry and initialize it with stored info
314 StoreEntry *e = new StoreEntry();
315
316 try {
317 // XXX: We do not know the URLs yet, only the key, but we need to parse and
318 // store the response for the Root().find() callers to be happy because they
319 // expect IN_MEMORY entries to already have the response headers and body.
320 e->createMemObject();
321
322 anchorEntry(*e, index, *slot);
323
324 // TODO: make copyFromShm() throw on all failures, simplifying this code
325 if (copyFromShm(*e, index, *slot))
326 return e;
327 debugs(20, 3, "failed for " << *e);
328 } catch (...) {
329 // see store_client::parseHttpHeadersFromDisk() for problems this may log
330 debugs(20, DBG_IMPORTANT, "ERROR: Cannot load a cache hit from shared memory" <<
331 Debug::Extra << "exception: " << CurrentException <<
332 Debug::Extra << "cache_mem entry: " << *e);
333 }
334
335 map->freeEntry(index); // do not let others into the same trap
336 destroyStoreEntry(static_cast<hash_link *>(e));
337 return nullptr;
338 }
339
340 void
341 MemStore::updateHeaders(StoreEntry *updatedE)
342 {
343 if (!map)
344 return;
345
346 Ipc::StoreMapUpdate update(updatedE);
347 assert(updatedE);
348 assert(updatedE->mem_obj);
349 if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index))
350 return;
351
352 try {
353 updateHeadersOrThrow(update);
354 } catch (const std::exception &ex) {
355 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
356 map->abortUpdating(update);
357 }
358 }
359
360 void
361 MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
362 {
363 // our +/- hdr_sz math below does not work if the chains differ [in size]
364 Must(update.stale.anchor->basics.swap_file_sz == update.fresh.anchor->basics.swap_file_sz);
365
366 const uint64_t staleHdrSz = update.entry->mem().baseReply().hdr_sz;
367 debugs(20, 7, "stale hdr_sz: " << staleHdrSz);
368
369 /* we will need to copy same-slice payload after the stored headers later */
370 Must(staleHdrSz > 0);
371 update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz);
372 Must(update.stale.splicingPoint >= 0);
373 Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz);
374
375 Must(update.stale.anchor);
376 ShmWriter writer(*this, update.entry, update.fresh.fileNo);
377 update.entry->mem().freshestReply().packHeadersUsingSlowPacker(writer);
378 const uint64_t freshHdrSz = writer.totalWritten;
379 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz));
380
381 /* copy same-slice payload remaining after the stored headers */
382 const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint);
383 const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize();
384 const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity;
385 Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier
386 Must(slice.size >= headersInLastSlice);
387 const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice;
388 const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint];
389 char *page = static_cast<char*>(PagePointer(extra.page));
390 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice);
391 writer.append(page + headersInLastSlice, payloadInLastSlice);
392 update.fresh.splicingPoint = writer.lastSlice;
393
394 update.fresh.anchor->basics.swap_file_sz -= staleHdrSz;
395 update.fresh.anchor->basics.swap_file_sz += freshHdrSz;
396
397 map->closeForUpdating(update);
398 }
399
400 bool
401 MemStore::anchorToCache(StoreEntry &entry)
402 {
403 Assure(!entry.hasMemStore());
404 Assure(entry.mem().memCache.io != MemObject::ioDone);
405
406 if (!map)
407 return false;
408
409 sfileno index;
410 const Ipc::StoreMapAnchor *const slot = map->openForReading(
411 reinterpret_cast<cache_key*>(entry.key), index);
412 if (!slot)
413 return false;
414
415 anchorEntry(entry, index, *slot);
416 if (!updateAnchoredWith(entry, index, *slot))
417 throw TextException("updateAnchoredWith() failure", Here());
418 return true;
419 }
420
421 bool
422 MemStore::updateAnchored(StoreEntry &entry)
423 {
424 if (!map)
425 return false;
426
427 assert(entry.mem_obj);
428 assert(entry.hasMemStore());
429 const sfileno index = entry.mem_obj->memCache.index;
430 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
431 return updateAnchoredWith(entry, index, anchor);
432 }
433
434 /// updates Transients entry after its anchor has been located
435 bool
436 MemStore::updateAnchoredWith(StoreEntry &entry, const sfileno index, const Ipc::StoreMapAnchor &anchor)
437 {
438 entry.swap_file_sz = anchor.basics.swap_file_sz;
439 const bool copied = copyFromShm(entry, index, anchor);
440 return copied;
441 }
442
443 /// anchors StoreEntry to an already locked map entry
444 void
445 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
446 {
447 assert(!e.hasDisk()); // no conflict with disk entry basics
448 anchor.exportInto(e);
449
450 assert(e.mem_obj);
451 if (anchor.complete()) {
452 e.store_status = STORE_OK;
453 e.mem_obj->object_sz = e.swap_file_sz;
454 e.setMemStatus(IN_MEMORY);
455 } else {
456 e.store_status = STORE_PENDING;
457 assert(e.mem_obj->object_sz < 0);
458 e.setMemStatus(NOT_IN_MEMORY);
459 }
460
461 EBIT_SET(e.flags, ENTRY_VALIDATED);
462
463 MemObject::MemCache &mc = e.mem_obj->memCache;
464 mc.index = index;
465 mc.io = MemObject::ioReading;
466 }
467
468 /// copies the entire entry from shared to local memory
469 bool
470 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
471 {
472 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
473 assert(e.mem_obj);
474
475 // emulate the usual Store code but w/o inapplicable checks and callbacks:
476
477 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
478 bool wasEof = anchor.complete() && sid < 0;
479 int64_t sliceOffset = 0;
480
481 SBuf httpHeaderParsingBuffer;
482 while (sid >= 0) {
483 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
484 // slice state may change during copying; take snapshots now
485 wasEof = anchor.complete() && slice.next < 0;
486 const Ipc::StoreMapSlice::Size wasSize = slice.size;
487
488 debugs(20, 8, "entry " << index << " slice " << sid << " eof " <<
489 wasEof << " wasSize " << wasSize << " <= " <<
490 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
491 " mem.endOffset " << e.mem_obj->endOffset());
492
493 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
494 // size of the slice data that we already copied
495 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
496 assert(prefixSize <= wasSize);
497
498 const MemStoreMapExtras::Item &extra = extras->items[sid];
499
500 char *page = static_cast<char*>(PagePointer(extra.page));
501 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
502 e.mem_obj->endOffset(),
503 page + prefixSize);
504
505 copyFromShmSlice(e, sliceBuf);
506 debugs(20, 8, "entry " << index << " copied slice " << sid <<
507 " from " << extra.page << '+' << prefixSize);
508
509 // parse headers if needed; they might span multiple slices!
510 if (!e.hasParsedReplyHeader()) {
511 httpHeaderParsingBuffer.append(sliceBuf.data, sliceBuf.length);
512 auto &reply = e.mem().adjustableBaseReply();
513 if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.c_str(), httpHeaderParsingBuffer.length()))
514 httpHeaderParsingBuffer = SBuf(); // we do not need these bytes anymore
515 }
516 }
517 // else skip a [possibly incomplete] slice that we copied earlier
518
519 // careful: the slice may have grown _and_ gotten the next slice ID!
520 if (slice.next >= 0) {
521 assert(!wasEof);
522 // here we know that slice.size may not change any more
523 if (wasSize >= slice.size) { // did not grow since we started copying
524 sliceOffset += wasSize;
525 sid = slice.next;
526 }
527 } else if (wasSize >= slice.size) { // did not grow
528 break;
529 }
530 }
531
532 if (!wasEof) {
533 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
534 anchor.basics.swap_file_sz << " bytes of " << e);
535 return true;
536 }
537
538 if (anchor.writerHalted) {
539 debugs(20, 5, "mem-loaded aborted " << e.mem_obj->endOffset() << '/' <<
540 anchor.basics.swap_file_sz << " bytes of " << e);
541 return false;
542 }
543
544 debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
545 anchor.basics.swap_file_sz << " bytes of " << e);
546
547 if (!e.hasParsedReplyHeader())
548 throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer.length()), Here());
549
550 // from StoreEntry::complete()
551 e.mem_obj->object_sz = e.mem_obj->endOffset();
552 e.store_status = STORE_OK;
553 e.setMemStatus(IN_MEMORY);
554
555 assert(e.mem_obj->object_sz >= 0);
556 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
557 // would be nice to call validLength() here, but it needs e.key
558
559 // we read the entire response into the local memory; no more need to lock
560 disconnect(e);
561 return true;
562 }
563
564 /// imports one shared memory slice into local memory
565 void
566 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf)
567 {
568 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
569
570 // local memory stores both headers and body so copy regardless of pstate
571 const int64_t offBefore = e.mem_obj->endOffset();
572 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
573 const int64_t offAfter = e.mem_obj->endOffset();
574 // expect to write the entire buf because StoreEntry::write() never fails
575 assert(offAfter >= 0 && offBefore <= offAfter &&
576 static_cast<size_t>(offAfter - offBefore) == buf.length);
577 }
578
579 /// whether we should cache the entry
580 bool
581 MemStore::shouldCache(StoreEntry &e) const
582 {
583 if (e.mem_status == IN_MEMORY) {
584 debugs(20, 5, "already loaded from mem-cache: " << e);
585 return false;
586 }
587
588 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
589 debugs(20, 5, "already written to mem-cache: " << e);
590 return false;
591 }
592
593 if (shutting_down) {
594 debugs(20, 5, "avoid heavy optional work during shutdown: " << e);
595 return false;
596 }
597
598 // To avoid SMP workers releasing each other caching attempts, restrict disk
599 // caching to StoreEntry publisher. This check goes before memoryCachable()
600 // that may incorrectly release() publisher's entry via checkCachable().
601 if (Store::Root().transientsReader(e)) {
602 debugs(20, 5, "yield to entry publisher: " << e);
603 return false;
604 }
605
606 if (!e.memoryCachable()) {
607 debugs(20, 7, "Not memory cachable: " << e);
608 return false; // will not cache due to entry state or properties
609 }
610
611 assert(e.mem_obj);
612
613 if (!e.mem_obj->vary_headers.isEmpty()) {
614 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
615 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
616 return false;
617 }
618
619 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
620 const int64_t loadedSize = e.mem_obj->endOffset();
621 const int64_t ramSize = max(loadedSize, expectedSize);
622 if (ramSize > maxObjectSize()) {
623 debugs(20, 5, "Too big max(" <<
624 loadedSize << ", " << expectedSize << "): " << e);
625 return false; // will not cache due to cachable entry size limits
626 }
627
628 if (!e.mem_obj->isContiguous()) {
629 debugs(20, 5, "not contiguous");
630 return false;
631 }
632
633 if (!map) {
634 debugs(20, 5, "No map to mem-cache " << e);
635 return false;
636 }
637
638 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
639 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
640 return false;
641 }
642
643 return true;
644 }
645
646 /// locks map anchor and preps to store the entry in shared memory
647 bool
648 MemStore::startCaching(StoreEntry &e)
649 {
650 sfileno index = 0;
651 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
652 if (!slot) {
653 debugs(20, 5, "No room in mem-cache map to index " << e);
654 return false;
655 }
656
657 assert(e.mem_obj);
658 e.mem_obj->memCache.index = index;
659 e.mem_obj->memCache.io = MemObject::ioWriting;
660 slot->set(e);
661 // Do not allow others to feed off an unknown-size entry because we will
662 // stop swapping it out if it grows too large.
663 if (e.mem_obj->expectedReplySize() >= 0)
664 map->startAppending(index);
665 e.memOutDecision(true);
666 return true;
667 }
668
669 /// copies all local data to shared memory
670 void
671 MemStore::copyToShm(StoreEntry &e)
672 {
673 assert(map);
674 assert(e.mem_obj);
675 Must(!EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT));
676
677 const int64_t eSize = e.mem_obj->endOffset();
678 if (e.mem_obj->memCache.offset >= eSize) {
679 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
680 e.mem_obj->memCache.offset << " >= " << eSize);
681 return; // nothing to do (yet)
682 }
683
684 // throw if an accepted unknown-size entry grew too big or max-size changed
685 Must(eSize <= maxObjectSize());
686
687 const int32_t index = e.mem_obj->memCache.index;
688 assert(index >= 0);
689 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
690 lastWritingSlice = anchor.start;
691
692 // fill, skip slices that are already full
693 // Optimize: remember lastWritingSlice in e.mem_obj
694 while (e.mem_obj->memCache.offset < eSize) {
695 Ipc::StoreMap::Slice &slice = nextAppendableSlice(
696 e.mem_obj->memCache.index, lastWritingSlice);
697 if (anchor.start < 0)
698 anchor.start = lastWritingSlice;
699 copyToShmSlice(e, anchor, slice);
700 }
701
702 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
703 }
704
705 /// copies at most one slice worth of local memory to shared memory
706 void
707 MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
708 {
709 Ipc::Mem::PageId page = pageForSlice(lastWritingSlice);
710 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
711 page);
712
713 const int64_t bufSize = Ipc::Mem::PageSize();
714 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
715 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
716 static_cast<char*>(PagePointer(page)) + sliceOffset);
717
718 // check that we kept everything or purge incomplete/sparse cached entry
719 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
720 if (copied <= 0) {
721 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
722 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
723 " in " << page);
724 throw TexcHere("data_hdr.copy failure");
725 }
726
727 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
728 " from " << e.mem_obj->memCache.offset << " in " << page);
729
730 slice.size += copied;
731 e.mem_obj->memCache.offset += copied;
732 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
733 }
734
735 /// starts checking with the entry chain slice at a given offset and
736 /// returns a not-full (but not necessarily empty) slice, updating sliceOffset
737 Ipc::StoreMap::Slice &
738 MemStore::nextAppendableSlice(const sfileno fileNo, sfileno &sliceOffset)
739 {
740 // allocate the very first slot for the entry if needed
741 if (sliceOffset < 0) {
742 Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo);
743 Must(anchor.start < 0);
744 Ipc::Mem::PageId page;
745 sliceOffset = reserveSapForWriting(page); // throws
746 extras->items[sliceOffset].page = page;
747 anchor.start = sliceOffset;
748 }
749
750 const size_t sliceCapacity = Ipc::Mem::PageSize();
751 do {
752 Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset);
753
754 if (slice.size >= sliceCapacity) {
755 if (slice.next >= 0) {
756 sliceOffset = slice.next;
757 continue;
758 }
759
760 Ipc::Mem::PageId page;
761 slice.next = sliceOffset = reserveSapForWriting(page);
762 extras->items[sliceOffset].page = page;
763 debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset);
764 continue; // to get and return the slice at the new sliceOffset
765 }
766
767 return slice;
768 } while (true);
769 /* not reached */
770 }
771
772 /// safely returns a previously allocated memory page for the given entry slice
773 Ipc::Mem::PageId
774 MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId)
775 {
776 Must(extras);
777 Must(sliceId >= 0);
778 Ipc::Mem::PageId page = extras->items[sliceId].page;
779 Must(page);
780 return page;
781 }
782
783 /// finds a slot and a free page to fill or throws
784 sfileno
785 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
786 {
787 Ipc::Mem::PageId slot;
788 if (freeSlots->pop(slot)) {
789 const auto slotId = slot.number - 1;
790 debugs(20, 5, "got a previously free slot: " << slotId);
791
792 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
793 debugs(20, 5, "and got a previously free page: " << page);
794 map->prepFreeSlice(slotId);
795 return slotId;
796 } else {
797 debugs(20, 3, "but there is no free page, returning " << slotId);
798 freeSlots->push(slot);
799 }
800 }
801
802 // catch free slots delivered to noteFreeMapSlice()
803 assert(!waitingFor);
804 waitingFor.slot = &slot;
805 waitingFor.page = &page;
806 if (map->purgeOne()) {
807 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
808 assert(slot.set());
809 assert(page.set());
810 const auto slotId = slot.number - 1;
811 map->prepFreeSlice(slotId);
812 debugs(20, 5, "got previously busy " << slotId << " and " << page);
813 return slotId;
814 }
815 assert(waitingFor.slot == &slot && waitingFor.page == &page);
816 waitingFor.slot = nullptr;
817 waitingFor.page = nullptr;
818
819 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
820 throw TexcHere("ran out of mem-cache slots");
821 }
822
823 void
824 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
825 {
826 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
827 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
828 assert(pageId);
829 Ipc::Mem::PageId slotId;
830 slotId.pool = Ipc::Mem::PageStack::IdForMemStoreSpace();
831 slotId.number = sliceId + 1;
832 if (!waitingFor) {
833 // must zero pageId before we give slice (and pageId extras!) to others
834 Ipc::Mem::PutPage(pageId);
835 freeSlots->push(slotId);
836 } else {
837 *waitingFor.slot = slotId;
838 *waitingFor.page = pageId;
839 waitingFor.slot = nullptr;
840 waitingFor.page = nullptr;
841 pageId = Ipc::Mem::PageId();
842 }
843 }
844
845 void
846 MemStore::write(StoreEntry &e)
847 {
848 assert(e.mem_obj);
849
850 debugs(20, 7, "entry " << e);
851
852 switch (e.mem_obj->memCache.io) {
853 case MemObject::ioUndecided:
854 if (!shouldCache(e) || !startCaching(e)) {
855 e.mem_obj->memCache.io = MemObject::ioDone;
856 e.memOutDecision(false);
857 return;
858 }
859 break;
860
861 case MemObject::ioDone:
862 case MemObject::ioReading:
863 return; // we should not write in all of the above cases
864
865 case MemObject::ioWriting:
866 break; // already decided to write and still writing
867 }
868
869 try {
870 copyToShm(e);
871 if (e.store_status == STORE_OK) // done receiving new content
872 completeWriting(e);
873 else
874 CollapsedForwarding::Broadcast(e);
875 return;
876 } catch (const std::exception &x) { // TODO: should we catch ... as well?
877 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
878 // fall through to the error handling code
879 }
880
881 disconnect(e);
882 }
883
884 void
885 MemStore::completeWriting(StoreEntry &e)
886 {
887 assert(e.mem_obj);
888 const int32_t index = e.mem_obj->memCache.index;
889 assert(index >= 0);
890 assert(map);
891
892 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
893
894 e.mem_obj->memCache.index = -1;
895 e.mem_obj->memCache.io = MemObject::ioDone;
896 map->closeForWriting(index);
897
898 CollapsedForwarding::Broadcast(e);
899 e.storeWriterDone();
900 }
901
902 void
903 MemStore::evictCached(StoreEntry &e)
904 {
905 debugs(47, 5, e);
906 if (e.hasMemStore()) {
907 if (map->freeEntry(e.mem_obj->memCache.index))
908 CollapsedForwarding::Broadcast(e);
909 if (!e.locked()) {
910 disconnect(e);
911 e.destroyMemObject();
912 }
913 } else if (const auto key = e.publicKey()) {
914 // the entry may have been loaded and then disconnected from the cache
915 evictIfFound(key);
916 if (!e.locked())
917 e.destroyMemObject();
918 }
919 }
920
921 void
922 MemStore::evictIfFound(const cache_key *key)
923 {
924 if (map)
925 map->freeEntryByKey(key);
926 }
927
928 void
929 MemStore::disconnect(StoreEntry &e)
930 {
931 assert(e.mem_obj);
932 MemObject &mem_obj = *e.mem_obj;
933 if (e.hasMemStore()) {
934 if (mem_obj.memCache.io == MemObject::ioWriting) {
935 map->abortWriting(mem_obj.memCache.index);
936 mem_obj.memCache.index = -1;
937 mem_obj.memCache.io = MemObject::ioDone;
938 CollapsedForwarding::Broadcast(e);
939 e.storeWriterDone();
940 } else {
941 assert(mem_obj.memCache.io == MemObject::ioReading);
942 map->closeForReading(mem_obj.memCache.index);
943 mem_obj.memCache.index = -1;
944 mem_obj.memCache.io = MemObject::ioDone;
945 }
946 }
947 }
948
949 bool
950 MemStore::Requested()
951 {
952 return Config.memShared && Config.memMaxSize > 0;
953 }
954
955 /// calculates maximum number of entries we need to store and map
956 int64_t
957 MemStore::EntryLimit()
958 {
959 if (!Requested())
960 return 0;
961
962 const int64_t minEntrySize = Ipc::Mem::PageSize();
963 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
964 return entryLimit;
965 }
966
967 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
968 /// decides whether to use a shared memory cache or checks its configuration;
969 /// and initializes shared memory segments used by MemStore
970 class MemStoreRr: public Ipc::Mem::RegisteredRunner
971 {
972 public:
973 /* RegisteredRunner API */
974 MemStoreRr(): spaceOwner(nullptr), mapOwner(nullptr), extrasOwner(nullptr) {}
975 void finalizeConfig() override;
976 void claimMemoryNeeds() override;
977 void useConfig() override;
978 ~MemStoreRr() override;
979
980 protected:
981 /* Ipc::Mem::RegisteredRunner API */
982 void create() override;
983
984 private:
985 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
986 MemStoreMap::Owner *mapOwner; ///< primary map Owner
987 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
988 };
989
990 DefineRunnerRegistrator(MemStoreRr);
991
992 void
993 MemStoreRr::claimMemoryNeeds()
994 {
995 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
996 }
997
998 void
999 MemStoreRr::finalizeConfig()
1000 {
1001 // decide whether to use a shared memory cache if the user did not specify
1002 if (!Config.memShared.configured()) {
1003 Config.memShared.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
1004 Config.memMaxSize > 0);
1005 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
1006 fatal("memory_cache_shared is on, but no support for shared memory detected");
1007 } else if (Config.memShared && !UsingSmp()) {
1008 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
1009 " a single worker is running");
1010 }
1011
1012 if (MemStore::Requested() && Config.memMaxSize < Ipc::Mem::PageSize()) {
1013 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" <<
1014 (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
1015 (Ipc::Mem::PageSize() / 1024.0) << " KB");
1016 }
1017 }
1018
1019 void
1020 MemStoreRr::useConfig()
1021 {
1022 assert(Config.memShared.configured());
1023 Ipc::Mem::RegisteredRunner::useConfig();
1024 }
1025
1026 void
1027 MemStoreRr::create()
1028 {
1029 if (!MemStore::Enabled())
1030 return;
1031
1032 const int64_t entryLimit = MemStore::EntryLimit();
1033 assert(entryLimit > 0);
1034
1035 Ipc::Mem::PageStack::Config spaceConfig;
1036 spaceConfig.poolId = Ipc::Mem::PageStack::IdForMemStoreSpace();
1037 spaceConfig.pageSize = 0; // the pages are stored in Ipc::Mem::Pages
1038 spaceConfig.capacity = entryLimit;
1039 spaceConfig.createFull = true; // all pages are initially available
1040 Must(!spaceOwner);
1041 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, spaceConfig);
1042 Must(!mapOwner);
1043 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
1044 Must(!extrasOwner);
1045 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
1046 }
1047
1048 MemStoreRr::~MemStoreRr()
1049 {
1050 delete extrasOwner;
1051 delete mapOwner;
1052 delete spaceOwner;
1053 }
1054