]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemStore.cc
Fixed Transient reader locking broken by 4310f8b (#161)
[thirdparty/squid.git] / src / MemStore.cc
CommitLineData
9487bae9 1/*
5b74111a 2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
9487bae9 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9487bae9
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 20 Memory Cache */
10
f7f3304a 11#include "squid.h"
a4555399 12#include "base/RunnersRegistry.h"
ce49546e 13#include "CollapsedForwarding.h"
582c2af2 14#include "HttpReply.h"
9487bae9
AR
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
18#include "MemStore.h"
b6149797 19#include "mime_header.h"
4d5904f7 20#include "SquidConfig.h"
06684a9b 21#include "SquidMath.h"
93bc1434 22#include "StoreStats.h"
5bed43d6 23#include "tools.h"
9487bae9 24
a4555399 25/// shared memory segment path to use for MemStore maps
1860fbac 26static const SBuf MapLabel("cache_mem_map");
06684a9b
AR
27/// shared memory segment path to use for the free slices index
28static const char *SpaceLabel = "cache_mem_space";
1860fbac
AR
29/// shared memory segment path to use for IDs of shared pages with slice data
30static const char *ExtrasLabel = "cache_mem_ex";
06684a9b 31// TODO: sync with Rock::SwapDir::*Path()
9487bae9 32
06684a9b
AR
33// We store free slot IDs (i.e., "space") as Page objects so that we can use
34// Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
35// used except for a positivity test. A unique value is handy for debugging.
36static const uint32_t SpacePoolId = 510716;
9487bae9 37
abf396ec
AR
38/// Packs to shared memory, allocating new slots/pages as needed.
39/// Requires an Ipc::StoreMapAnchor locked for writing.
40class ShmWriter: public Packable
41{
42public:
43 ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1);
44
45 /* Packable API */
46 virtual void append(const char *aBuf, int aSize) override;
47 virtual void vappendf(const char *fmt, va_list ap) override;
48
49public:
50 StoreEntry *entry; ///< the entry being updated
51
52 /// the slot keeping the first byte of the appended content (at least)
53 /// either set via constructor parameter or allocated by the first append
54 Ipc::StoreMapSliceId firstSlice;
55
56 /// the slot keeping the last byte of the appended content (at least)
57 Ipc::StoreMapSliceId lastSlice;
58
59 uint64_t totalWritten; ///< cumulative number of bytes appended so far
60
61protected:
62 void copyToShm();
63 void copyToShmSlice(Ipc::StoreMap::Slice &slice);
64
65private:
66 MemStore &store;
67 const sfileno fileNo;
68
69 /* set by (and only valid during) append calls */
70 const char *buf; ///< content being appended now
71 int bufSize; ///< buf size
72 int bufWritten; ///< buf bytes appended so far
73};
74
75/* ShmWriter */
76
77ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice):
78 entry(anEntry),
79 firstSlice(aFirstSlice),
80 lastSlice(firstSlice),
81 totalWritten(0),
82 store(aStore),
83 fileNo(aFileNo),
84 buf(nullptr),
85 bufSize(0),
86 bufWritten(0)
87{
88 Must(entry);
89}
90
91void
92ShmWriter::append(const char *aBuf, int aBufSize)
93{
94 Must(!buf);
95 buf = aBuf;
96 bufSize = aBufSize;
97 if (bufSize) {
98 Must(buf);
99 bufWritten = 0;
100 copyToShm();
101 }
102 buf = nullptr;
103 bufSize = 0;
104 bufWritten = 0;
105}
106
107void
108ShmWriter::vappendf(const char *fmt, va_list ap)
109{
110 SBuf vaBuf;
abf396ec 111 va_list apCopy;
9a03c44b 112 va_copy(apCopy, ap);
abf396ec
AR
113 vaBuf.vappendf(fmt, apCopy);
114 va_end(apCopy);
abf396ec
AR
115 append(vaBuf.rawContent(), vaBuf.length());
116}
117
118/// copies the entire buffer to shared memory
119void
120ShmWriter::copyToShm()
121{
122 Must(bufSize > 0); // do not use up shared memory pages for nothing
123 Must(firstSlice < 0 || lastSlice >= 0);
124
125 // fill, skip slices that are already full
126 while (bufWritten < bufSize) {
127 Ipc::StoreMap::Slice &slice = store.nextAppendableSlice(fileNo, lastSlice);
128 if (firstSlice < 0)
129 firstSlice = lastSlice;
130 copyToShmSlice(slice);
131 }
132
133 debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry);
134}
135
136/// copies at most one slice worth of buffer to shared memory
137void
138ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice &slice)
139{
140 Ipc::Mem::PageId page = store.pageForSlice(lastSlice);
141 debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " <<
142 page);
143
144 Must(bufWritten <= bufSize);
145 const int64_t writingDebt = bufSize - bufWritten;
146 const int64_t pageSize = Ipc::Mem::PageSize();
147 const int64_t sliceOffset = totalWritten % pageSize;
148 const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
149 memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten,
150 copySize);
151
152 debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " <<
153 entry << " from " << sliceOffset << " in " << page);
154
155 slice.size += copySize;
156 bufWritten += copySize;
157 totalWritten += copySize;
158 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
159
160 // either we wrote everything or we filled the entire slice
161 Must(bufWritten == bufSize || sliceOffset + copySize == pageSize);
162}
163
164/* MemStore */
165
06684a9b 166MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
9487bae9
AR
167{
168}
169
170MemStore::~MemStore()
171{
172 delete map;
173}
174
175void
9199139f
AR
176MemStore::init()
177{
a4555399
AR
178 const int64_t entryLimit = EntryLimit();
179 if (entryLimit <= 0)
180 return; // no memory cache configured or a misconfiguration
181
06684a9b
AR
182 // check compatibility with the disk cache, if any
183 if (Config.cacheSwap.n_configured > 0) {
9d4e9cfb
AR
184 const int64_t diskMaxSize = Store::Root().maxObjectSize();
185 const int64_t memMaxSize = maxObjectSize();
186 if (diskMaxSize == -1) {
187 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
188 "is unlimited but mem-cache maximum object size is " <<
189 memMaxSize / 1024.0 << " KB");
190 } else if (diskMaxSize > memMaxSize) {
191 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
192 "is too large for mem-cache: " <<
193 diskMaxSize / 1024.0 << " KB > " <<
194 memMaxSize / 1024.0 << " KB");
195 }
06684a9b 196 }
af2fda07 197
06684a9b 198 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
1860fbac 199 extras = shm_old(Extras)(ExtrasLabel);
06684a9b
AR
200
201 Must(!map);
202 map = new MemStoreMap(MapLabel);
a4555399 203 map->cleaner = this;
9487bae9
AR
204}
205
93bc1434
AR
206void
207MemStore::getStats(StoreInfoStats &stats) const
208{
209 const size_t pageSize = Ipc::Mem::PageSize();
210
211 stats.mem.shared = true;
212 stats.mem.capacity =
213 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
214 stats.mem.size =
215 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
216 stats.mem.count = currentCount();
217}
218
9487bae9 219void
c4e688b7 220MemStore::stat(StoreEntry &e) const
9487bae9 221{
c4e688b7
AR
222 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
223
06684a9b
AR
224 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
225 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
226 currentSize() / 1024.0,
227 Math::doublePercent(currentSize(), maxSize()));
c4e688b7
AR
228
229 if (map) {
36c84e19
AR
230 const int entryLimit = map->entryLimit();
231 const int slotLimit = map->sliceLimit();
232 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
233 if (entryLimit > 0) {
c91ca3ce 234 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
36c84e19
AR
235 currentCount(), (100.0 * currentCount() / entryLimit));
236 }
c4e688b7 237
36c84e19
AR
238 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
239 if (slotLimit > 0) {
06684a9b
AR
240 const unsigned int slotsFree =
241 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
36c84e19
AR
242 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
243 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
06684a9b 244 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
36c84e19 245 usedSlots, (100.0 * usedSlots / slotLimit));
06684a9b
AR
246 }
247
36c84e19 248 if (slotLimit < 100) { // XXX: otherwise too expensive to count
c4e688b7
AR
249 Ipc::ReadWriteLockStats stats;
250 map->updateStats(stats);
251 stats.dump(e);
9199139f
AR
252 }
253 }
254 }
9487bae9
AR
255}
256
257void
258MemStore::maintain()
259{
260}
261
262uint64_t
263MemStore::minSize() const
264{
265 return 0; // XXX: irrelevant, but Store parent forces us to implement this
266}
267
268uint64_t
269MemStore::maxSize() const
270{
06684a9b 271 return Config.memMaxSize;
9487bae9
AR
272}
273
39c1e1d9
DK
274uint64_t
275MemStore::currentSize() const
276{
06684a9b 277 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
9d4e9cfb 278 Ipc::Mem::PageSize();
39c1e1d9
DK
279}
280
281uint64_t
282MemStore::currentCount() const
283{
284 return map ? map->entryCount() : 0;
285}
286
af2fda07
DK
287int64_t
288MemStore::maxObjectSize() const
289{
06684a9b 290 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
af2fda07
DK
291}
292
9487bae9
AR
293void
294MemStore::reference(StoreEntry &)
295{
296}
297
4c973beb 298bool
2745fea5 299MemStore::dereference(StoreEntry &)
9487bae9 300{
4c973beb
AR
301 // no need to keep e in the global store_table for us; we have our own map
302 return false;
9487bae9
AR
303}
304
9487bae9
AR
305StoreEntry *
306MemStore::get(const cache_key *key)
307{
308 if (!map)
309 return NULL;
310
9487bae9 311 sfileno index;
10dc0fe6 312 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
9487bae9
AR
313 if (!slot)
314 return NULL;
315
9487bae9
AR
316 // create a brand new store entry and initialize it with stored info
317 StoreEntry *e = new StoreEntry();
9487bae9 318
ce49546e 319 // XXX: We do not know the URLs yet, only the key, but we need to parse and
4310f8b0 320 // store the response for the Root().find() callers to be happy because they
ce49546e 321 // expect IN_MEMORY entries to already have the response headers and body.
76d61119 322 e->createMemObject();
9487bae9 323
ce49546e 324 anchorEntry(*e, index, *slot);
9487bae9 325
06684a9b 326 const bool copied = copyFromShm(*e, index, *slot);
9487bae9 327
4310f8b0 328 if (copied)
9487bae9 329 return e;
9487bae9 330
4310f8b0 331 debugs(20, 3, "failed for " << *e);
10dc0fe6 332 map->freeEntry(index); // do not let others into the same trap
4310f8b0 333 destroyStoreEntry(static_cast<hash_link *>(e));
9487bae9
AR
334 return NULL;
335}
336
abf396ec
AR
337void
338MemStore::updateHeaders(StoreEntry *updatedE)
339{
340 if (!map)
341 return;
342
343 Ipc::StoreMapUpdate update(updatedE);
344 assert(updatedE);
345 assert(updatedE->mem_obj);
346 if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index))
347 return;
348
349 try {
350 updateHeadersOrThrow(update);
351 } catch (const std::exception &ex) {
352 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
353 map->abortUpdating(update);
354 }
355}
356
357void
358MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
359{
360 // our +/- hdr_sz math below does not work if the chains differ [in size]
361 Must(update.stale.anchor->basics.swap_file_sz == update.fresh.anchor->basics.swap_file_sz);
362
363 const HttpReply *rawReply = update.entry->getReply();
364 Must(rawReply);
365 const HttpReply &reply = *rawReply;
366 const uint64_t staleHdrSz = reply.hdr_sz;
367 debugs(20, 7, "stale hdr_sz: " << staleHdrSz);
368
369 /* we will need to copy same-slice payload after the stored headers later */
370 Must(staleHdrSz > 0);
371 update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz);
372 Must(update.stale.splicingPoint >= 0);
373 Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz);
374
375 Must(update.stale.anchor);
376 ShmWriter writer(*this, update.entry, update.fresh.fileNo);
377 reply.packHeadersInto(&writer);
378 const uint64_t freshHdrSz = writer.totalWritten;
379 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz));
380
381 /* copy same-slice payload remaining after the stored headers */
382 const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint);
383 const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize();
384 const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity;
385 Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier
386 Must(slice.size >= headersInLastSlice);
387 const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice;
388 const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint];
389 char *page = static_cast<char*>(PagePointer(extra.page));
390 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice);
391 writer.append(page + headersInLastSlice, payloadInLastSlice);
392 update.fresh.splicingPoint = writer.lastSlice;
393
394 update.fresh.anchor->basics.swap_file_sz -= staleHdrSz;
395 update.fresh.anchor->basics.swap_file_sz += freshHdrSz;
396
397 map->closeForUpdating(update);
398}
399
ce49546e 400bool
4310f8b0 401MemStore::anchorToCache(StoreEntry &entry, bool &inSync)
ce49546e
AR
402{
403 if (!map)
404 return false;
405
406 sfileno index;
407 const Ipc::StoreMapAnchor *const slot = map->openForReading(
4310f8b0 408 reinterpret_cast<cache_key*>(entry.key), index);
ce49546e
AR
409 if (!slot)
410 return false;
411
4310f8b0
EB
412 anchorEntry(entry, index, *slot);
413 inSync = updateAnchoredWith(entry, index, *slot);
4475555f 414 return true; // even if inSync is false
ce49546e
AR
415}
416
417bool
4310f8b0 418MemStore::updateAnchored(StoreEntry &entry)
ce49546e 419{
4475555f
AR
420 if (!map)
421 return false;
422
4310f8b0
EB
423 assert(entry.mem_obj);
424 assert(entry.hasMemStore());
425 const sfileno index = entry.mem_obj->memCache.index;
ce49546e 426 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
4310f8b0 427 return updateAnchoredWith(entry, index, anchor);
ce49546e
AR
428}
429
4310f8b0 430/// updates Transients entry after its anchor has been located
ce49546e 431bool
4310f8b0 432MemStore::updateAnchoredWith(StoreEntry &entry, const sfileno index, const Ipc::StoreMapAnchor &anchor)
ce49546e 433{
4310f8b0
EB
434 entry.swap_file_sz = anchor.basics.swap_file_sz;
435 const bool copied = copyFromShm(entry, index, anchor);
e6d2c263 436 return copied;
ce49546e
AR
437}
438
439/// anchors StoreEntry to an already locked map entry
440void
441MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
442{
4310f8b0
EB
443 assert(!e.hasDisk()); // no conflict with disk entry basics
444 anchor.exportInto(e);
ce49546e
AR
445
446 assert(e.mem_obj);
4475555f
AR
447 if (anchor.complete()) {
448 e.store_status = STORE_OK;
449 e.mem_obj->object_sz = e.swap_file_sz;
1bfe9ade 450 e.setMemStatus(IN_MEMORY);
4475555f
AR
451 } else {
452 e.store_status = STORE_PENDING;
453 assert(e.mem_obj->object_sz < 0);
1bfe9ade 454 e.setMemStatus(NOT_IN_MEMORY);
4475555f 455 }
ce49546e 456
ce49546e 457 EBIT_SET(e.flags, ENTRY_VALIDATED);
4475555f
AR
458
459 MemObject::MemCache &mc = e.mem_obj->memCache;
460 mc.index = index;
99921d9d 461 mc.io = MemObject::ioReading;
ce49546e
AR
462}
463
06684a9b 464/// copies the entire entry from shared to local memory
9487bae9 465bool
06684a9b 466MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
9487bae9 467{
06684a9b 468 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
4475555f 469 assert(e.mem_obj);
9487bae9 470
9487bae9
AR
471 // emulate the usual Store code but w/o inapplicable checks and callbacks:
472
ce49546e
AR
473 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
474 bool wasEof = anchor.complete() && sid < 0;
475 int64_t sliceOffset = 0;
06684a9b
AR
476 while (sid >= 0) {
477 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
ce49546e
AR
478 // slice state may change during copying; take snapshots now
479 wasEof = anchor.complete() && slice.next < 0;
480 const Ipc::StoreMapSlice::Size wasSize = slice.size;
4475555f
AR
481
482 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
483 wasEof << " wasSize " << wasSize << " <= " <<
484 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
485 " mem.endOffset " << e.mem_obj->endOffset());
9d4e9cfb
AR
486
487 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
ce49546e
AR
488 // size of the slice data that we already copied
489 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
490 assert(prefixSize <= wasSize);
491
1860fbac
AR
492 const MemStoreMapExtras::Item &extra = extras->items[sid];
493
494 char *page = static_cast<char*>(PagePointer(extra.page));
ce49546e
AR
495 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
496 e.mem_obj->endOffset(),
497 page + prefixSize);
498 if (!copyFromShmSlice(e, sliceBuf, wasEof))
499 return false;
500 debugs(20, 9, "entry " << index << " copied slice " << sid <<
1860fbac 501 " from " << extra.page << '+' << prefixSize);
ce49546e
AR
502 }
503 // else skip a [possibly incomplete] slice that we copied earlier
504
505 // careful: the slice may have grown _and_ gotten the next slice ID!
506 if (slice.next >= 0) {
507 assert(!wasEof);
508 // here we know that slice.size may not change any more
509 if (wasSize >= slice.size) { // did not grow since we started copying
510 sliceOffset += wasSize;
511 sid = slice.next;
9d4e9cfb 512 }
ce49546e
AR
513 } else if (wasSize >= slice.size) { // did not grow
514 break;
515 }
516 }
517
518 if (!wasEof) {
519 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
520 anchor.basics.swap_file_sz << " bytes of " << e);
521 return true;
9487bae9 522 }
9487bae9 523
06684a9b
AR
524 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
525 anchor.basics.swap_file_sz << " bytes of " << e);
99921d9d
AR
526
527 // from StoreEntry::complete()
528 e.mem_obj->object_sz = e.mem_obj->endOffset();
529 e.store_status = STORE_OK;
1bfe9ade 530 e.setMemStatus(IN_MEMORY);
99921d9d 531
06684a9b
AR
532 assert(e.mem_obj->object_sz >= 0);
533 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
9487bae9
AR
534 // would be nice to call validLength() here, but it needs e.key
535
99921d9d 536 // we read the entire response into the local memory; no more need to lock
29c56e41 537 disconnect(e);
9487bae9
AR
538 return true;
539}
540
06684a9b
AR
541/// imports one shared memory slice into local memory
542bool
ce49546e 543MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
06684a9b
AR
544{
545 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
546
547 // from store_client::readBody()
548 // parse headers if needed; they might span multiple slices!
549 HttpReply *rep = (HttpReply *)e.getReply();
fb654382 550 if (rep->pstate < Http::Message::psParsed) {
06684a9b
AR
551 // XXX: have to copy because httpMsgParseStep() requires 0-termination
552 MemBuf mb;
553 mb.init(buf.length+1, buf.length+1);
9d4e9cfb 554 mb.append(buf.data, buf.length);
06684a9b
AR
555 mb.terminate();
556 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
557 if (result > 0) {
fb654382 558 assert(rep->pstate == Http::Message::psParsed);
4475555f 559 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
06684a9b
AR
560 } else if (result < 0) {
561 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
562 return false;
563 } else { // more slices are needed
564 assert(!eof);
565 }
566 }
567 debugs(20, 7, "rep pstate: " << rep->pstate);
568
569 // local memory stores both headers and body so copy regardless of pstate
570 const int64_t offBefore = e.mem_obj->endOffset();
571 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
572 const int64_t offAfter = e.mem_obj->endOffset();
573 // expect to write the entire buf because StoreEntry::write() never fails
574 assert(offAfter >= 0 && offBefore <= offAfter &&
575 static_cast<size_t>(offAfter - offBefore) == buf.length);
576 return true;
577}
578
4475555f 579/// whether we should cache the entry
96a7de88 580bool
97754f5a 581MemStore::shouldCache(StoreEntry &e) const
9487bae9 582{
4475555f
AR
583 if (e.mem_status == IN_MEMORY) {
584 debugs(20, 5, "already loaded from mem-cache: " << e);
585 return false;
586 }
587
588 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
589 debugs(20, 5, "already written to mem-cache: " << e);
590 return false;
591 }
592
9487bae9
AR
593 if (!e.memoryCachable()) {
594 debugs(20, 7, HERE << "Not memory cachable: " << e);
96a7de88
DK
595 return false; // will not cache due to entry state or properties
596 }
597
598 assert(e.mem_obj);
b8a899c0 599
90ab8f20 600 if (!e.mem_obj->vary_headers.isEmpty()) {
b8a899c0
AR
601 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
602 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
603 return false;
604 }
605
96a7de88 606 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
4475555f 607 const int64_t loadedSize = e.mem_obj->endOffset();
96a7de88 608 const int64_t ramSize = max(loadedSize, expectedSize);
06684a9b 609 if (ramSize > maxObjectSize()) {
96a7de88
DK
610 debugs(20, 5, HERE << "Too big max(" <<
611 loadedSize << ", " << expectedSize << "): " << e);
612 return false; // will not cache due to cachable entry size limits
613 }
614
97754f5a
AR
615 if (!e.mem_obj->isContiguous()) {
616 debugs(20, 5, "not contiguous");
617 return false;
618 }
619
4475555f
AR
620 if (!map) {
621 debugs(20, 5, HERE << "No map to mem-cache " << e);
622 return false;
623 }
624
99921d9d 625 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
539283df 626 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
99921d9d
AR
627 return false;
628 }
629
96a7de88
DK
630 return true;
631}
632
4475555f
AR
633/// locks map anchor and preps to store the entry in shared memory
634bool
635MemStore::startCaching(StoreEntry &e)
96a7de88 636{
4475555f
AR
637 sfileno index = 0;
638 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
639 if (!slot) {
640 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
641 return false;
449ca8c5
AR
642 }
643
4475555f
AR
644 assert(e.mem_obj);
645 e.mem_obj->memCache.index = index;
99921d9d 646 e.mem_obj->memCache.io = MemObject::ioWriting;
4475555f 647 slot->set(e);
5ca027f0
AR
648 // Do not allow others to feed off an unknown-size entry because we will
649 // stop swapping it out if it grows too large.
650 if (e.mem_obj->expectedReplySize() >= 0)
651 map->startAppending(index);
0cdcf3d7 652 e.memOutDecision(true);
4475555f
AR
653 return true;
654}
655
656/// copies all local data to shared memory
657void
658MemStore::copyToShm(StoreEntry &e)
659{
660 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
661 // not knowing when the wait is over
662 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
663 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
9d4e9cfb 664 return;
06684a9b
AR
665 }
666
4475555f 667 assert(map);
9487bae9 668 assert(e.mem_obj);
449ca8c5 669
4475555f
AR
670 const int64_t eSize = e.mem_obj->endOffset();
671 if (e.mem_obj->memCache.offset >= eSize) {
672 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
673 e.mem_obj->memCache.offset << " >= " << eSize);
674 return; // nothing to do (yet)
96a7de88
DK
675 }
676
5ca027f0
AR
677 // throw if an accepted unknown-size entry grew too big or max-size changed
678 Must(eSize <= maxObjectSize());
679
abf396ec
AR
680 const int32_t index = e.mem_obj->memCache.index;
681 assert(index >= 0);
682 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
4475555f 683 lastWritingSlice = anchor.start;
9487bae9 684
4475555f
AR
685 // fill, skip slices that are already full
686 // Optimize: remember lastWritingSlice in e.mem_obj
687 while (e.mem_obj->memCache.offset < eSize) {
abf396ec
AR
688 Ipc::StoreMap::Slice &slice = nextAppendableSlice(
689 e.mem_obj->memCache.index, lastWritingSlice);
690 if (anchor.start < 0)
691 anchor.start = lastWritingSlice;
692 copyToShmSlice(e, anchor, slice);
9487bae9 693 }
06684a9b 694
4475555f 695 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
9487bae9
AR
696}
697
4475555f
AR
698/// copies at most one slice worth of local memory to shared memory
699void
abf396ec 700MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
06684a9b 701{
abf396ec 702 Ipc::Mem::PageId page = pageForSlice(lastWritingSlice);
4475555f
AR
703 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
704 page);
9487bae9
AR
705
706 const int64_t bufSize = Ipc::Mem::PageSize();
4475555f
AR
707 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
708 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
709 static_cast<char*>(PagePointer(page)) + sliceOffset);
9199139f 710
9487bae9
AR
711 // check that we kept everything or purge incomplete/sparse cached entry
712 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
06684a9b 713 if (copied <= 0) {
4475555f
AR
714 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
715 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
716 " in " << page);
717 throw TexcHere("data_hdr.copy failure");
9487bae9
AR
718 }
719
06684a9b 720 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
4475555f 721 " from " << e.mem_obj->memCache.offset << " in " << page);
06684a9b 722
4475555f
AR
723 slice.size += copied;
724 e.mem_obj->memCache.offset += copied;
76ba3c8a 725 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
9487bae9 726}
7f6748c8 727
abf396ec
AR
728/// starts checking with the entry chain slice at a given offset and
729/// returns a not-full (but not necessarily empty) slice, updating sliceOffset
730Ipc::StoreMap::Slice &
731MemStore::nextAppendableSlice(const sfileno fileNo, sfileno &sliceOffset)
732{
733 // allocate the very first slot for the entry if needed
734 if (sliceOffset < 0) {
735 Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo);
736 Must(anchor.start < 0);
737 Ipc::Mem::PageId page;
738 sliceOffset = reserveSapForWriting(page); // throws
739 extras->items[sliceOffset].page = page;
740 anchor.start = sliceOffset;
741 }
742
743 const size_t sliceCapacity = Ipc::Mem::PageSize();
744 do {
745 Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset);
746
747 if (slice.size >= sliceCapacity) {
748 if (slice.next >= 0) {
749 sliceOffset = slice.next;
750 continue;
751 }
752
753 Ipc::Mem::PageId page;
754 slice.next = sliceOffset = reserveSapForWriting(page);
755 extras->items[sliceOffset].page = page;
756 debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset);
82a190b9 757 continue; // to get and return the slice at the new sliceOffset
abf396ec
AR
758 }
759
760 return slice;
761 } while (true);
762 /* not reached */
763}
764
765/// safely returns a previously allocated memory page for the given entry slice
766Ipc::Mem::PageId
767MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId)
768{
769 Must(extras);
770 Must(sliceId >= 0);
771 Ipc::Mem::PageId page = extras->items[sliceId].page;
772 Must(page);
773 return page;
774}
775
06684a9b
AR
776/// finds a slot and a free page to fill or throws
777sfileno
778MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
7f6748c8 779{
06684a9b
AR
780 Ipc::Mem::PageId slot;
781 if (freeSlots->pop(slot)) {
782 debugs(20, 5, "got a previously free slot: " << slot);
783
784 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
785 debugs(20, 5, "and got a previously free page: " << page);
786 return slot.number - 1;
787 } else {
788 debugs(20, 3, "but there is no free page, returning " << slot);
789 freeSlots->push(slot);
790 }
791 }
9d4e9cfb 792
06684a9b
AR
793 // catch free slots delivered to noteFreeMapSlice()
794 assert(!waitingFor);
795 waitingFor.slot = &slot;
796 waitingFor.page = &page;
797 if (map->purgeOne()) {
798 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
799 assert(slot.set());
800 assert(page.set());
801 debugs(20, 5, "got previously busy " << slot << " and " << page);
802 return slot.number - 1;
803 }
804 assert(waitingFor.slot == &slot && waitingFor.page == &page);
805 waitingFor.slot = NULL;
806 waitingFor.page = NULL;
807
808 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
809 throw TexcHere("ran out of mem-cache slots");
810}
811
812void
36c84e19 813MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
06684a9b 814{
1860fbac 815 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
06684a9b
AR
816 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
817 assert(pageId);
818 Ipc::Mem::PageId slotId;
819 slotId.pool = SpacePoolId;
820 slotId.number = sliceId + 1;
821 if (!waitingFor) {
822 // must zero pageId before we give slice (and pageId extras!) to others
823 Ipc::Mem::PutPage(pageId);
824 freeSlots->push(slotId);
825 } else {
826 *waitingFor.slot = slotId;
827 *waitingFor.page = pageId;
828 waitingFor.slot = NULL;
829 waitingFor.page = NULL;
830 pageId = Ipc::Mem::PageId();
831 }
7f6748c8
AR
832}
833
ce49546e 834void
4475555f 835MemStore::write(StoreEntry &e)
ce49546e
AR
836{
837 assert(e.mem_obj);
4475555f
AR
838
839 debugs(20, 7, "entry " << e);
840
841 switch (e.mem_obj->memCache.io) {
99921d9d 842 case MemObject::ioUndecided:
4475555f 843 if (!shouldCache(e) || !startCaching(e)) {
99921d9d 844 e.mem_obj->memCache.io = MemObject::ioDone;
0cdcf3d7 845 e.memOutDecision(false);
4475555f
AR
846 return;
847 }
848 break;
9d4e9cfb 849
99921d9d
AR
850 case MemObject::ioDone:
851 case MemObject::ioReading:
4475555f
AR
852 return; // we should not write in all of the above cases
853
99921d9d 854 case MemObject::ioWriting:
4475555f
AR
855 break; // already decided to write and still writing
856 }
857
858 try {
859 copyToShm(e);
860 if (e.store_status == STORE_OK) // done receiving new content
861 completeWriting(e);
99921d9d
AR
862 else
863 CollapsedForwarding::Broadcast(e);
4475555f 864 return;
9d4e9cfb 865 } catch (const std::exception &x) { // TODO: should we catch ... as well?
4475555f
AR
866 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
867 // fall through to the error handling code
868 }
869
29c56e41 870 disconnect(e);
4475555f
AR
871}
872
873void
874MemStore::completeWriting(StoreEntry &e)
875{
876 assert(e.mem_obj);
877 const int32_t index = e.mem_obj->memCache.index;
878 assert(index >= 0);
879 assert(map);
880
881 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
882
883 e.mem_obj->memCache.index = -1;
99921d9d 884 e.mem_obj->memCache.io = MemObject::ioDone;
8253d451 885 map->closeForWriting(index);
99921d9d
AR
886
887 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
888 Store::Root().transientsCompleteWriting(e);
4475555f
AR
889}
890
1bfe9ade 891void
4310f8b0 892MemStore::evictCached(StoreEntry &e)
1bfe9ade 893{
4310f8b0
EB
894 debugs(47, 5, e);
895 if (e.hasMemStore()) {
896 if (map->freeEntry(e.mem_obj->memCache.index))
897 CollapsedForwarding::Broadcast(e);
898 if (!e.locked()) {
899 disconnect(e);
900 e.destroyMemObject();
901 }
902 } else if (const auto key = e.publicKey()) {
903 // the entry may have been loaded and then disconnected from the cache
904 evictIfFound(key);
905 if (!e.locked())
906 e.destroyMemObject();
907 }
1bfe9ade
AR
908}
909
4475555f 910void
4310f8b0 911MemStore::evictIfFound(const cache_key *key)
4475555f 912{
4310f8b0
EB
913 if (map)
914 map->freeEntryByKey(key);
ce49546e
AR
915}
916
917void
29c56e41 918MemStore::disconnect(StoreEntry &e)
ce49546e 919{
29c56e41
AR
920 assert(e.mem_obj);
921 MemObject &mem_obj = *e.mem_obj;
4310f8b0 922 if (e.hasMemStore()) {
99921d9d 923 if (mem_obj.memCache.io == MemObject::ioWriting) {
4475555f 924 map->abortWriting(mem_obj.memCache.index);
29c56e41
AR
925 mem_obj.memCache.index = -1;
926 mem_obj.memCache.io = MemObject::ioDone;
4310f8b0 927 Store::Root().stopSharing(e); // broadcasts after the change
4475555f 928 } else {
99921d9d 929 assert(mem_obj.memCache.io == MemObject::ioReading);
4475555f 930 map->closeForReading(mem_obj.memCache.index);
29c56e41
AR
931 mem_obj.memCache.index = -1;
932 mem_obj.memCache.io = MemObject::ioDone;
4475555f 933 }
ce49546e
AR
934 }
935}
936
a4555399
AR
937/// calculates maximum number of entries we need to store and map
938int64_t
939MemStore::EntryLimit()
940{
45e8762c 941 if (!Config.memShared || !Config.memMaxSize)
a4555399
AR
942 return 0; // no memory cache configured
943
06684a9b
AR
944 const int64_t minEntrySize = Ipc::Mem::PageSize();
945 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
a4555399
AR
946 return entryLimit;
947}
948
21b7990f
AR
949/// reports our needs for shared memory pages to Ipc::Mem::Pages;
950/// decides whether to use a shared memory cache or checks its configuration;
951/// and initializes shared memory segments used by MemStore
952class MemStoreRr: public Ipc::Mem::RegisteredRunner
ea2cdeb6
DK
953{
954public:
955 /* RegisteredRunner API */
1860fbac 956 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {}
21b7990f
AR
957 virtual void finalizeConfig();
958 virtual void claimMemoryNeeds();
959 virtual void useConfig();
960 virtual ~MemStoreRr();
961
962protected:
963 /* Ipc::Mem::RegisteredRunner API */
964 virtual void create();
965
966private:
967 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
968 MemStoreMap::Owner *mapOwner; ///< primary map Owner
1860fbac 969 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
ea2cdeb6
DK
970};
971
21b7990f 972RunnerRegistrationEntry(MemStoreRr);
ea2cdeb6 973
ea2cdeb6 974void
21b7990f 975MemStoreRr::claimMemoryNeeds()
ea2cdeb6
DK
976{
977 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
978}
979
21b7990f
AR
980void
981MemStoreRr::finalizeConfig()
a4555399 982{
57af1e3f
AR
983 // decide whether to use a shared memory cache if the user did not specify
984 if (!Config.memShared.configured()) {
75777642 985 Config.memShared.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
9199139f 986 Config.memMaxSize > 0);
9199139f 987 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
c975f532 988 fatal("memory_cache_shared is on, but no support for shared memory detected");
53bbccec
DK
989 } else if (Config.memShared && !UsingSmp()) {
990 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
991 " a single worker is running");
57af1e3f 992 }
45e8762c
AR
993}
994
21b7990f
AR
995void
996MemStoreRr::useConfig()
45e8762c
AR
997{
998 assert(Config.memShared.configured());
21b7990f 999 Ipc::Mem::RegisteredRunner::useConfig();
4404f1c5
DK
1000}
1001
21b7990f
AR
1002void
1003MemStoreRr::create()
4404f1c5 1004{
57af1e3f 1005 if (!Config.memShared)
60be8b2d 1006 return;
a4555399 1007
4404f1c5 1008 const int64_t entryLimit = MemStore::EntryLimit();
ea2cdeb6
DK
1009 if (entryLimit <= 0) {
1010 if (Config.memMaxSize > 0) {
1011 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
1012 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
1013 (Ipc::Mem::PageSize() / 1024.0) << " KB");
1014 }
4404f1c5 1015 return; // no memory cache configured or a misconfiguration
ea2cdeb6 1016 }
06684a9b
AR
1017
1018 Must(!spaceOwner);
1019 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
636b913c 1020 entryLimit, 0);
06684a9b
AR
1021 Must(!mapOwner);
1022 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
1860fbac
AR
1023 Must(!extrasOwner);
1024 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
a4555399 1025}
c011f9bc
DK
1026
1027MemStoreRr::~MemStoreRr()
1028{
1860fbac 1029 delete extrasOwner;
06684a9b
AR
1030 delete mapOwner;
1031 delete spaceOwner;
c011f9bc 1032}
f53969cc 1033