]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemStore.cc
Cleanup: fix most 'unused parameter' warnings
[thirdparty/squid.git] / src / MemStore.cc
CommitLineData
9487bae9 1/*
bbc27441 2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
9487bae9 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9487bae9
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 20 Memory Cache */
10
f7f3304a 11#include "squid.h"
a4555399 12#include "base/RunnersRegistry.h"
ce49546e 13#include "CollapsedForwarding.h"
582c2af2 14#include "HttpReply.h"
9487bae9
AR
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
18#include "MemStore.h"
b6149797 19#include "mime_header.h"
4d5904f7 20#include "SquidConfig.h"
06684a9b 21#include "SquidMath.h"
93bc1434 22#include "StoreStats.h"
5bed43d6 23#include "tools.h"
9487bae9 24
a4555399 25/// shared memory segment path to use for MemStore maps
1860fbac 26static const SBuf MapLabel("cache_mem_map");
06684a9b
AR
27/// shared memory segment path to use for the free slices index
28static const char *SpaceLabel = "cache_mem_space";
1860fbac
AR
29/// shared memory segment path to use for IDs of shared pages with slice data
30static const char *ExtrasLabel = "cache_mem_ex";
06684a9b 31// TODO: sync with Rock::SwapDir::*Path()
9487bae9 32
06684a9b
AR
33// We store free slot IDs (i.e., "space") as Page objects so that we can use
34// Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
35// used except for a positivity test. A unique value is handy for debugging.
36static const uint32_t SpacePoolId = 510716;
9487bae9 37
06684a9b 38MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
9487bae9
AR
39{
40}
41
42MemStore::~MemStore()
43{
44 delete map;
45}
46
47void
9199139f
AR
48MemStore::init()
49{
a4555399
AR
50 const int64_t entryLimit = EntryLimit();
51 if (entryLimit <= 0)
52 return; // no memory cache configured or a misconfiguration
53
06684a9b
AR
54 // check compatibility with the disk cache, if any
55 if (Config.cacheSwap.n_configured > 0) {
9d4e9cfb
AR
56 const int64_t diskMaxSize = Store::Root().maxObjectSize();
57 const int64_t memMaxSize = maxObjectSize();
58 if (diskMaxSize == -1) {
59 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
60 "is unlimited but mem-cache maximum object size is " <<
61 memMaxSize / 1024.0 << " KB");
62 } else if (diskMaxSize > memMaxSize) {
63 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
64 "is too large for mem-cache: " <<
65 diskMaxSize / 1024.0 << " KB > " <<
66 memMaxSize / 1024.0 << " KB");
67 }
06684a9b 68 }
af2fda07 69
06684a9b 70 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
1860fbac 71 extras = shm_old(Extras)(ExtrasLabel);
06684a9b
AR
72
73 Must(!map);
74 map = new MemStoreMap(MapLabel);
a4555399 75 map->cleaner = this;
9487bae9
AR
76}
77
93bc1434
AR
78void
79MemStore::getStats(StoreInfoStats &stats) const
80{
81 const size_t pageSize = Ipc::Mem::PageSize();
82
83 stats.mem.shared = true;
84 stats.mem.capacity =
85 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
86 stats.mem.size =
87 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
88 stats.mem.count = currentCount();
89}
90
9487bae9 91void
c4e688b7 92MemStore::stat(StoreEntry &e) const
9487bae9 93{
c4e688b7
AR
94 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
95
06684a9b
AR
96 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
97 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
98 currentSize() / 1024.0,
99 Math::doublePercent(currentSize(), maxSize()));
c4e688b7
AR
100
101 if (map) {
36c84e19
AR
102 const int entryLimit = map->entryLimit();
103 const int slotLimit = map->sliceLimit();
104 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
105 if (entryLimit > 0) {
c91ca3ce 106 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
36c84e19
AR
107 currentCount(), (100.0 * currentCount() / entryLimit));
108 }
c4e688b7 109
36c84e19
AR
110 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
111 if (slotLimit > 0) {
06684a9b
AR
112 const unsigned int slotsFree =
113 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
36c84e19
AR
114 if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
115 const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
06684a9b 116 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
36c84e19 117 usedSlots, (100.0 * usedSlots / slotLimit));
06684a9b
AR
118 }
119
36c84e19 120 if (slotLimit < 100) { // XXX: otherwise too expensive to count
c4e688b7
AR
121 Ipc::ReadWriteLockStats stats;
122 map->updateStats(stats);
123 stats.dump(e);
9199139f
AR
124 }
125 }
126 }
9487bae9
AR
127}
128
129void
130MemStore::maintain()
131{
132}
133
134uint64_t
135MemStore::minSize() const
136{
137 return 0; // XXX: irrelevant, but Store parent forces us to implement this
138}
139
140uint64_t
141MemStore::maxSize() const
142{
06684a9b 143 return Config.memMaxSize;
9487bae9
AR
144}
145
39c1e1d9
DK
146uint64_t
147MemStore::currentSize() const
148{
06684a9b 149 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
9d4e9cfb 150 Ipc::Mem::PageSize();
39c1e1d9
DK
151}
152
153uint64_t
154MemStore::currentCount() const
155{
156 return map ? map->entryCount() : 0;
157}
158
af2fda07
DK
159int64_t
160MemStore::maxObjectSize() const
161{
06684a9b 162 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
af2fda07
DK
163}
164
9487bae9
AR
165void
166MemStore::reference(StoreEntry &)
167{
168}
169
4c973beb 170bool
54347cbd 171MemStore::dereference(StoreEntry &, bool)
9487bae9 172{
4c973beb
AR
173 // no need to keep e in the global store_table for us; we have our own map
174 return false;
9487bae9
AR
175}
176
177int
178MemStore::callback()
179{
180 return 0;
181}
182
183StoreSearch *
184MemStore::search(String const, HttpRequest *)
185{
186 fatal("not implemented");
187 return NULL;
188}
189
190StoreEntry *
191MemStore::get(const cache_key *key)
192{
193 if (!map)
194 return NULL;
195
9487bae9 196 sfileno index;
10dc0fe6 197 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
9487bae9
AR
198 if (!slot)
199 return NULL;
200
9487bae9
AR
201 // create a brand new store entry and initialize it with stored info
202 StoreEntry *e = new StoreEntry();
9487bae9 203
ce49546e
AR
204 // XXX: We do not know the URLs yet, only the key, but we need to parse and
205 // store the response for the Root().get() callers to be happy because they
206 // expect IN_MEMORY entries to already have the response headers and body.
c877c0bc 207 e->makeMemObject();
9487bae9 208
ce49546e 209 anchorEntry(*e, index, *slot);
9487bae9 210
06684a9b 211 const bool copied = copyFromShm(*e, index, *slot);
9487bae9 212
9487bae9
AR
213 if (copied) {
214 e->hashInsert(key);
215 return e;
216 }
217
218 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
10dc0fe6 219 map->freeEntry(index); // do not let others into the same trap
9487bae9
AR
220 return NULL;
221}
222
223void
ced8def3 224MemStore::get(String const, STOREGETCLIENT, void *)
9487bae9
AR
225{
226 // XXX: not needed but Store parent forces us to implement this
227 fatal("MemStore::get(key,callback,data) should not be called");
228}
229
ce49546e 230bool
4475555f 231MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
ce49546e
AR
232{
233 if (!map)
234 return false;
235
236 sfileno index;
237 const Ipc::StoreMapAnchor *const slot = map->openForReading(
f53969cc 238 reinterpret_cast<cache_key*>(collapsed.key), index);
ce49546e
AR
239 if (!slot)
240 return false;
241
242 anchorEntry(collapsed, index, *slot);
4475555f
AR
243 inSync = updateCollapsedWith(collapsed, index, *slot);
244 return true; // even if inSync is false
ce49546e
AR
245}
246
247bool
248MemStore::updateCollapsed(StoreEntry &collapsed)
249{
c877c0bc 250 assert(collapsed.mem_obj);
ce49546e 251
9d4e9cfb 252 const sfileno index = collapsed.mem_obj->memCache.index;
ce49546e
AR
253
254 // already disconnected from the cache, no need to update
9d4e9cfb 255 if (index < 0)
ce49546e
AR
256 return true;
257
4475555f
AR
258 if (!map)
259 return false;
260
ce49546e
AR
261 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
262 return updateCollapsedWith(collapsed, index, anchor);
263}
264
e6d2c263 265/// updates collapsed entry after its anchor has been located
ce49546e
AR
266bool
267MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
268{
e6d2c263 269 collapsed.swap_file_sz = anchor.basics.swap_file_sz;
ce49546e 270 const bool copied = copyFromShm(collapsed, index, anchor);
e6d2c263 271 return copied;
ce49546e
AR
272}
273
274/// anchors StoreEntry to an already locked map entry
275void
276MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
277{
278 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
279
280 e.swap_file_sz = basics.swap_file_sz;
281 e.lastref = basics.lastref;
282 e.timestamp = basics.timestamp;
283 e.expires = basics.expires;
284 e.lastmod = basics.lastmod;
285 e.refcount = basics.refcount;
286 e.flags = basics.flags;
287
288 assert(e.mem_obj);
4475555f
AR
289 if (anchor.complete()) {
290 e.store_status = STORE_OK;
291 e.mem_obj->object_sz = e.swap_file_sz;
1bfe9ade 292 e.setMemStatus(IN_MEMORY);
4475555f
AR
293 } else {
294 e.store_status = STORE_PENDING;
295 assert(e.mem_obj->object_sz < 0);
1bfe9ade 296 e.setMemStatus(NOT_IN_MEMORY);
4475555f 297 }
ce49546e
AR
298 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
299 e.ping_status = PING_NONE;
300
ce49546e
AR
301 EBIT_CLR(e.flags, RELEASE_REQUEST);
302 EBIT_CLR(e.flags, KEY_PRIVATE);
303 EBIT_SET(e.flags, ENTRY_VALIDATED);
4475555f
AR
304
305 MemObject::MemCache &mc = e.mem_obj->memCache;
306 mc.index = index;
99921d9d 307 mc.io = MemObject::ioReading;
ce49546e
AR
308}
309
06684a9b 310/// copies the entire entry from shared to local memory
9487bae9 311bool
06684a9b 312MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
9487bae9 313{
06684a9b 314 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
4475555f 315 assert(e.mem_obj);
9487bae9 316
9487bae9
AR
317 // emulate the usual Store code but w/o inapplicable checks and callbacks:
318
ce49546e
AR
319 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
320 bool wasEof = anchor.complete() && sid < 0;
321 int64_t sliceOffset = 0;
06684a9b
AR
322 while (sid >= 0) {
323 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
ce49546e
AR
324 // slice state may change during copying; take snapshots now
325 wasEof = anchor.complete() && slice.next < 0;
326 const Ipc::StoreMapSlice::Size wasSize = slice.size;
4475555f
AR
327
328 debugs(20, 9, "entry " << index << " slice " << sid << " eof " <<
329 wasEof << " wasSize " << wasSize << " <= " <<
330 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
331 " mem.endOffset " << e.mem_obj->endOffset());
9d4e9cfb
AR
332
333 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
ce49546e
AR
334 // size of the slice data that we already copied
335 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
336 assert(prefixSize <= wasSize);
337
1860fbac
AR
338 const MemStoreMapExtras::Item &extra = extras->items[sid];
339
340 char *page = static_cast<char*>(PagePointer(extra.page));
ce49546e
AR
341 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
342 e.mem_obj->endOffset(),
343 page + prefixSize);
344 if (!copyFromShmSlice(e, sliceBuf, wasEof))
345 return false;
346 debugs(20, 9, "entry " << index << " copied slice " << sid <<
1860fbac 347 " from " << extra.page << '+' << prefixSize);
ce49546e
AR
348 }
349 // else skip a [possibly incomplete] slice that we copied earlier
350
351 // careful: the slice may have grown _and_ gotten the next slice ID!
352 if (slice.next >= 0) {
353 assert(!wasEof);
354 // here we know that slice.size may not change any more
355 if (wasSize >= slice.size) { // did not grow since we started copying
356 sliceOffset += wasSize;
357 sid = slice.next;
9d4e9cfb 358 }
ce49546e
AR
359 } else if (wasSize >= slice.size) { // did not grow
360 break;
361 }
362 }
363
364 if (!wasEof) {
365 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
366 anchor.basics.swap_file_sz << " bytes of " << e);
367 return true;
9487bae9 368 }
9487bae9 369
06684a9b
AR
370 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
371 anchor.basics.swap_file_sz << " bytes of " << e);
99921d9d
AR
372
373 // from StoreEntry::complete()
374 e.mem_obj->object_sz = e.mem_obj->endOffset();
375 e.store_status = STORE_OK;
1bfe9ade 376 e.setMemStatus(IN_MEMORY);
99921d9d 377
06684a9b
AR
378 assert(e.mem_obj->object_sz >= 0);
379 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
9487bae9
AR
380 // would be nice to call validLength() here, but it needs e.key
381
99921d9d 382 // we read the entire response into the local memory; no more need to lock
29c56e41 383 disconnect(e);
9487bae9
AR
384 return true;
385}
386
06684a9b
AR
387/// imports one shared memory slice into local memory
388bool
ce49546e 389MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
06684a9b
AR
390{
391 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
392
393 // from store_client::readBody()
394 // parse headers if needed; they might span multiple slices!
395 HttpReply *rep = (HttpReply *)e.getReply();
396 if (rep->pstate < psParsed) {
397 // XXX: have to copy because httpMsgParseStep() requires 0-termination
398 MemBuf mb;
399 mb.init(buf.length+1, buf.length+1);
9d4e9cfb 400 mb.append(buf.data, buf.length);
06684a9b
AR
401 mb.terminate();
402 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
403 if (result > 0) {
404 assert(rep->pstate == psParsed);
4475555f 405 EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT);
06684a9b
AR
406 } else if (result < 0) {
407 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
408 return false;
409 } else { // more slices are needed
410 assert(!eof);
411 }
412 }
413 debugs(20, 7, "rep pstate: " << rep->pstate);
414
415 // local memory stores both headers and body so copy regardless of pstate
416 const int64_t offBefore = e.mem_obj->endOffset();
417 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
418 const int64_t offAfter = e.mem_obj->endOffset();
419 // expect to write the entire buf because StoreEntry::write() never fails
420 assert(offAfter >= 0 && offBefore <= offAfter &&
421 static_cast<size_t>(offAfter - offBefore) == buf.length);
422 return true;
423}
424
4475555f 425/// whether we should cache the entry
96a7de88 426bool
97754f5a 427MemStore::shouldCache(StoreEntry &e) const
9487bae9 428{
4475555f
AR
429 if (e.mem_status == IN_MEMORY) {
430 debugs(20, 5, "already loaded from mem-cache: " << e);
431 return false;
432 }
433
434 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
435 debugs(20, 5, "already written to mem-cache: " << e);
436 return false;
437 }
438
9487bae9
AR
439 if (!e.memoryCachable()) {
440 debugs(20, 7, HERE << "Not memory cachable: " << e);
96a7de88
DK
441 return false; // will not cache due to entry state or properties
442 }
443
444 assert(e.mem_obj);
b8a899c0
AR
445
446 if (e.mem_obj->vary_headers) {
447 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
448 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
449 return false;
450 }
451
96a7de88 452 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
4475555f
AR
453
454 // objects of unknown size are not allowed into memory cache, for now
455 if (expectedSize < 0) {
539283df 456 debugs(20, 5, "Unknown expected size: " << e);
4475555f
AR
457 return false;
458 }
459
460 const int64_t loadedSize = e.mem_obj->endOffset();
96a7de88
DK
461 const int64_t ramSize = max(loadedSize, expectedSize);
462
06684a9b 463 if (ramSize > maxObjectSize()) {
96a7de88
DK
464 debugs(20, 5, HERE << "Too big max(" <<
465 loadedSize << ", " << expectedSize << "): " << e);
466 return false; // will not cache due to cachable entry size limits
467 }
468
97754f5a
AR
469 if (!e.mem_obj->isContiguous()) {
470 debugs(20, 5, "not contiguous");
471 return false;
472 }
473
4475555f
AR
474 if (!map) {
475 debugs(20, 5, HERE << "No map to mem-cache " << e);
476 return false;
477 }
478
99921d9d 479 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
539283df 480 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
99921d9d
AR
481 return false;
482 }
483
96a7de88
DK
484 return true;
485}
486
4475555f
AR
487/// locks map anchor and preps to store the entry in shared memory
488bool
489MemStore::startCaching(StoreEntry &e)
96a7de88 490{
4475555f
AR
491 sfileno index = 0;
492 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
493 if (!slot) {
494 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
495 return false;
449ca8c5
AR
496 }
497
4475555f
AR
498 assert(e.mem_obj);
499 e.mem_obj->memCache.index = index;
99921d9d 500 e.mem_obj->memCache.io = MemObject::ioWriting;
4475555f
AR
501 slot->set(e);
502 map->startAppending(index);
0cdcf3d7 503 e.memOutDecision(true);
4475555f
AR
504 return true;
505}
506
507/// copies all local data to shared memory
508void
509MemStore::copyToShm(StoreEntry &e)
510{
511 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
512 // not knowing when the wait is over
513 if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) {
514 debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT");
9d4e9cfb 515 return;
06684a9b
AR
516 }
517
4475555f 518 assert(map);
9487bae9 519 assert(e.mem_obj);
449ca8c5 520
4475555f
AR
521 const int32_t index = e.mem_obj->memCache.index;
522 assert(index >= 0);
523 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
449ca8c5 524
4475555f
AR
525 const int64_t eSize = e.mem_obj->endOffset();
526 if (e.mem_obj->memCache.offset >= eSize) {
527 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
528 e.mem_obj->memCache.offset << " >= " << eSize);
529 return; // nothing to do (yet)
96a7de88
DK
530 }
531
4475555f
AR
532 if (anchor.start < 0) { // must allocate the very first slot for e
533 Ipc::Mem::PageId page;
534 anchor.start = reserveSapForWriting(page); // throws
1860fbac 535 extras->items[anchor.start].page = page;
449ca8c5
AR
536 }
537
4475555f
AR
538 lastWritingSlice = anchor.start;
539 const size_t sliceCapacity = Ipc::Mem::PageSize();
9487bae9 540
4475555f
AR
541 // fill, skip slices that are already full
542 // Optimize: remember lastWritingSlice in e.mem_obj
543 while (e.mem_obj->memCache.offset < eSize) {
544 Ipc::StoreMap::Slice &slice =
545 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
9487bae9 546
4475555f
AR
547 if (slice.size >= sliceCapacity) {
548 if (slice.next >= 0) {
549 lastWritingSlice = slice.next;
550 continue;
551 }
9487bae9 552
4475555f
AR
553 Ipc::Mem::PageId page;
554 slice.next = lastWritingSlice = reserveSapForWriting(page);
1860fbac 555 extras->items[lastWritingSlice].page = page;
4475555f 556 debugs(20, 7, "entry " << index << " new slice: " << lastWritingSlice);
9d4e9cfb 557 }
4475555f 558
9d4e9cfb 559 copyToShmSlice(e, anchor);
9487bae9 560 }
06684a9b 561
4475555f 562 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
9487bae9
AR
563}
564
4475555f
AR
565/// copies at most one slice worth of local memory to shared memory
566void
567MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor)
06684a9b 568{
4475555f
AR
569 Ipc::StoreMap::Slice &slice =
570 map->writeableSlice(e.mem_obj->memCache.index, lastWritingSlice);
06684a9b 571
1860fbac 572 Ipc::Mem::PageId page = extras->items[lastWritingSlice].page;
4475555f
AR
573 assert(lastWritingSlice >= 0 && page);
574 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
575 page);
9487bae9
AR
576
577 const int64_t bufSize = Ipc::Mem::PageSize();
4475555f
AR
578 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
579 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
580 static_cast<char*>(PagePointer(page)) + sliceOffset);
9199139f 581
9487bae9
AR
582 // check that we kept everything or purge incomplete/sparse cached entry
583 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
06684a9b 584 if (copied <= 0) {
4475555f
AR
585 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
586 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
587 " in " << page);
588 throw TexcHere("data_hdr.copy failure");
9487bae9
AR
589 }
590
06684a9b 591 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
4475555f 592 " from " << e.mem_obj->memCache.offset << " in " << page);
06684a9b 593
4475555f
AR
594 slice.size += copied;
595 e.mem_obj->memCache.offset += copied;
76ba3c8a 596 anchor.basics.swap_file_sz = e.mem_obj->memCache.offset;
9487bae9 597}
7f6748c8 598
06684a9b
AR
599/// finds a slot and a free page to fill or throws
600sfileno
601MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
7f6748c8 602{
06684a9b
AR
603 Ipc::Mem::PageId slot;
604 if (freeSlots->pop(slot)) {
605 debugs(20, 5, "got a previously free slot: " << slot);
606
607 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
608 debugs(20, 5, "and got a previously free page: " << page);
609 return slot.number - 1;
610 } else {
611 debugs(20, 3, "but there is no free page, returning " << slot);
612 freeSlots->push(slot);
613 }
614 }
9d4e9cfb 615
06684a9b
AR
616 // catch free slots delivered to noteFreeMapSlice()
617 assert(!waitingFor);
618 waitingFor.slot = &slot;
619 waitingFor.page = &page;
620 if (map->purgeOne()) {
621 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
622 assert(slot.set());
623 assert(page.set());
624 debugs(20, 5, "got previously busy " << slot << " and " << page);
625 return slot.number - 1;
626 }
627 assert(waitingFor.slot == &slot && waitingFor.page == &page);
628 waitingFor.slot = NULL;
629 waitingFor.page = NULL;
630
631 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
632 throw TexcHere("ran out of mem-cache slots");
633}
634
635void
36c84e19 636MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
06684a9b 637{
1860fbac 638 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
06684a9b
AR
639 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
640 assert(pageId);
641 Ipc::Mem::PageId slotId;
642 slotId.pool = SpacePoolId;
643 slotId.number = sliceId + 1;
644 if (!waitingFor) {
645 // must zero pageId before we give slice (and pageId extras!) to others
646 Ipc::Mem::PutPage(pageId);
647 freeSlots->push(slotId);
648 } else {
649 *waitingFor.slot = slotId;
650 *waitingFor.page = pageId;
651 waitingFor.slot = NULL;
652 waitingFor.page = NULL;
653 pageId = Ipc::Mem::PageId();
654 }
7f6748c8
AR
655}
656
ce49546e 657void
4475555f 658MemStore::write(StoreEntry &e)
ce49546e
AR
659{
660 assert(e.mem_obj);
4475555f
AR
661
662 debugs(20, 7, "entry " << e);
663
664 switch (e.mem_obj->memCache.io) {
99921d9d 665 case MemObject::ioUndecided:
4475555f 666 if (!shouldCache(e) || !startCaching(e)) {
99921d9d 667 e.mem_obj->memCache.io = MemObject::ioDone;
0cdcf3d7 668 e.memOutDecision(false);
4475555f
AR
669 return;
670 }
671 break;
9d4e9cfb 672
99921d9d
AR
673 case MemObject::ioDone:
674 case MemObject::ioReading:
4475555f
AR
675 return; // we should not write in all of the above cases
676
99921d9d 677 case MemObject::ioWriting:
4475555f
AR
678 break; // already decided to write and still writing
679 }
680
681 try {
682 copyToShm(e);
683 if (e.store_status == STORE_OK) // done receiving new content
684 completeWriting(e);
99921d9d
AR
685 else
686 CollapsedForwarding::Broadcast(e);
4475555f 687 return;
9d4e9cfb 688 } catch (const std::exception &x) { // TODO: should we catch ... as well?
4475555f
AR
689 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
690 // fall through to the error handling code
691 }
692
29c56e41 693 disconnect(e);
4475555f
AR
694}
695
696void
697MemStore::completeWriting(StoreEntry &e)
698{
699 assert(e.mem_obj);
700 const int32_t index = e.mem_obj->memCache.index;
701 assert(index >= 0);
702 assert(map);
703
704 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
705
706 e.mem_obj->memCache.index = -1;
99921d9d 707 e.mem_obj->memCache.io = MemObject::ioDone;
4475555f 708 map->closeForWriting(index, false);
99921d9d
AR
709
710 CollapsedForwarding::Broadcast(e); // before we close our transient entry!
711 Store::Root().transientsCompleteWriting(e);
4475555f
AR
712}
713
1bfe9ade
AR
714void
715MemStore::markForUnlink(StoreEntry &e)
716{
717 assert(e.mem_obj);
718 if (e.mem_obj->memCache.index >= 0)
719 map->freeEntry(e.mem_obj->memCache.index);
720}
721
4475555f
AR
722void
723MemStore::unlink(StoreEntry &e)
724{
e41a207f 725 if (e.mem_obj && e.mem_obj->memCache.index >= 0) {
c877c0bc 726 map->freeEntry(e.mem_obj->memCache.index);
29c56e41 727 disconnect(e);
ce49546e 728 } else {
29c56e41 729 // the entry may have been loaded and then disconnected from the cache
ce49546e
AR
730 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
731 }
9d4e9cfb 732
4475555f 733 e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
ce49546e
AR
734}
735
736void
29c56e41 737MemStore::disconnect(StoreEntry &e)
ce49546e 738{
29c56e41
AR
739 assert(e.mem_obj);
740 MemObject &mem_obj = *e.mem_obj;
4475555f 741 if (mem_obj.memCache.index >= 0) {
99921d9d 742 if (mem_obj.memCache.io == MemObject::ioWriting) {
4475555f 743 map->abortWriting(mem_obj.memCache.index);
29c56e41
AR
744 mem_obj.memCache.index = -1;
745 mem_obj.memCache.io = MemObject::ioDone;
746 Store::Root().transientsAbandon(e); // broadcasts after the change
4475555f 747 } else {
99921d9d 748 assert(mem_obj.memCache.io == MemObject::ioReading);
4475555f 749 map->closeForReading(mem_obj.memCache.index);
29c56e41
AR
750 mem_obj.memCache.index = -1;
751 mem_obj.memCache.io = MemObject::ioDone;
4475555f 752 }
ce49546e
AR
753 }
754}
755
a4555399
AR
756/// calculates maximum number of entries we need to store and map
757int64_t
758MemStore::EntryLimit()
759{
45e8762c 760 if (!Config.memShared || !Config.memMaxSize)
a4555399
AR
761 return 0; // no memory cache configured
762
06684a9b
AR
763 const int64_t minEntrySize = Ipc::Mem::PageSize();
764 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
a4555399
AR
765 return entryLimit;
766}
767
21b7990f
AR
768/// reports our needs for shared memory pages to Ipc::Mem::Pages;
769/// decides whether to use a shared memory cache or checks its configuration;
770/// and initializes shared memory segments used by MemStore
771class MemStoreRr: public Ipc::Mem::RegisteredRunner
ea2cdeb6
DK
772{
773public:
774 /* RegisteredRunner API */
1860fbac 775 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {}
21b7990f
AR
776 virtual void finalizeConfig();
777 virtual void claimMemoryNeeds();
778 virtual void useConfig();
779 virtual ~MemStoreRr();
780
781protected:
782 /* Ipc::Mem::RegisteredRunner API */
783 virtual void create();
784
785private:
786 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
787 MemStoreMap::Owner *mapOwner; ///< primary map Owner
1860fbac 788 Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner
ea2cdeb6
DK
789};
790
21b7990f 791RunnerRegistrationEntry(MemStoreRr);
ea2cdeb6 792
ea2cdeb6 793void
21b7990f 794MemStoreRr::claimMemoryNeeds()
ea2cdeb6
DK
795{
796 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
797}
798
21b7990f
AR
799void
800MemStoreRr::finalizeConfig()
a4555399 801{
57af1e3f
AR
802 // decide whether to use a shared memory cache if the user did not specify
803 if (!Config.memShared.configured()) {
794d4c0c 804 Config.memShared.configure(Ipc::Atomic::Enabled() &&
9199139f
AR
805 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
806 Config.memMaxSize > 0);
794d4c0c 807 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
65b81b27
AR
808 // bail if the user wants shared memory cache but we cannot support it
809 fatal("memory_cache_shared is on, but no support for atomic operations detected");
9199139f 810 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
c975f532 811 fatal("memory_cache_shared is on, but no support for shared memory detected");
53bbccec
DK
812 } else if (Config.memShared && !UsingSmp()) {
813 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
814 " a single worker is running");
57af1e3f 815 }
45e8762c
AR
816}
817
21b7990f
AR
818void
819MemStoreRr::useConfig()
45e8762c
AR
820{
821 assert(Config.memShared.configured());
21b7990f 822 Ipc::Mem::RegisteredRunner::useConfig();
4404f1c5
DK
823}
824
21b7990f
AR
825void
826MemStoreRr::create()
4404f1c5 827{
57af1e3f 828 if (!Config.memShared)
60be8b2d 829 return;
a4555399 830
4404f1c5 831 const int64_t entryLimit = MemStore::EntryLimit();
ea2cdeb6
DK
832 if (entryLimit <= 0) {
833 if (Config.memMaxSize > 0) {
834 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
835 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
836 (Ipc::Mem::PageSize() / 1024.0) << " KB");
837 }
4404f1c5 838 return; // no memory cache configured or a misconfiguration
ea2cdeb6 839 }
06684a9b
AR
840
841 Must(!spaceOwner);
842 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
636b913c 843 entryLimit, 0);
06684a9b
AR
844 Must(!mapOwner);
845 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
1860fbac
AR
846 Must(!extrasOwner);
847 extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit);
a4555399 848}
c011f9bc
DK
849
850MemStoreRr::~MemStoreRr()
851{
1860fbac 852 delete extrasOwner;
06684a9b
AR
853 delete mapOwner;
854 delete spaceOwner;
c011f9bc 855}
f53969cc 856