]>
Commit | Line | Data |
---|---|---|
9487bae9 | 1 | /* |
4ac4a490 | 2 | * Copyright (C) 1996-2017 The Squid Software Foundation and contributors |
9487bae9 | 3 | * |
bbc27441 AJ |
4 | * Squid software is distributed under GPLv2+ license and includes |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
9487bae9 AR |
7 | */ |
8 | ||
bbc27441 AJ |
9 | /* DEBUG: section 20 Memory Cache */ |
10 | ||
f7f3304a | 11 | #include "squid.h" |
a4555399 | 12 | #include "base/RunnersRegistry.h" |
ce49546e | 13 | #include "CollapsedForwarding.h" |
582c2af2 | 14 | #include "HttpReply.h" |
9487bae9 AR |
15 | #include "ipc/mem/Page.h" |
16 | #include "ipc/mem/Pages.h" | |
17 | #include "MemObject.h" | |
18 | #include "MemStore.h" | |
b6149797 | 19 | #include "mime_header.h" |
4d5904f7 | 20 | #include "SquidConfig.h" |
06684a9b | 21 | #include "SquidMath.h" |
93bc1434 | 22 | #include "StoreStats.h" |
5bed43d6 | 23 | #include "tools.h" |
9487bae9 | 24 | |
a4555399 | 25 | /// shared memory segment path to use for MemStore maps |
1860fbac | 26 | static const SBuf MapLabel("cache_mem_map"); |
06684a9b AR |
27 | /// shared memory segment path to use for the free slices index |
28 | static const char *SpaceLabel = "cache_mem_space"; | |
1860fbac AR |
29 | /// shared memory segment path to use for IDs of shared pages with slice data |
30 | static const char *ExtrasLabel = "cache_mem_ex"; | |
06684a9b | 31 | // TODO: sync with Rock::SwapDir::*Path() |
9487bae9 | 32 | |
06684a9b AR |
33 | // We store free slot IDs (i.e., "space") as Page objects so that we can use |
34 | // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really | |
35 | // used except for a positivity test. A unique value is handy for debugging. | |
36 | static const uint32_t SpacePoolId = 510716; | |
9487bae9 | 37 | |
abf396ec AR |
38 | /// Packs to shared memory, allocating new slots/pages as needed. |
39 | /// Requires an Ipc::StoreMapAnchor locked for writing. | |
40 | class ShmWriter: public Packable | |
41 | { | |
42 | public: | |
43 | ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1); | |
44 | ||
45 | /* Packable API */ | |
46 | virtual void append(const char *aBuf, int aSize) override; | |
47 | virtual void vappendf(const char *fmt, va_list ap) override; | |
48 | ||
49 | public: | |
50 | StoreEntry *entry; ///< the entry being updated | |
51 | ||
52 | /// the slot keeping the first byte of the appended content (at least) | |
53 | /// either set via constructor parameter or allocated by the first append | |
54 | Ipc::StoreMapSliceId firstSlice; | |
55 | ||
56 | /// the slot keeping the last byte of the appended content (at least) | |
57 | Ipc::StoreMapSliceId lastSlice; | |
58 | ||
59 | uint64_t totalWritten; ///< cumulative number of bytes appended so far | |
60 | ||
61 | protected: | |
62 | void copyToShm(); | |
63 | void copyToShmSlice(Ipc::StoreMap::Slice &slice); | |
64 | ||
65 | private: | |
66 | MemStore &store; | |
67 | const sfileno fileNo; | |
68 | ||
69 | /* set by (and only valid during) append calls */ | |
70 | const char *buf; ///< content being appended now | |
71 | int bufSize; ///< buf size | |
72 | int bufWritten; ///< buf bytes appended so far | |
73 | }; | |
74 | ||
75 | /* ShmWriter */ | |
76 | ||
77 | ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice): | |
78 | entry(anEntry), | |
79 | firstSlice(aFirstSlice), | |
80 | lastSlice(firstSlice), | |
81 | totalWritten(0), | |
82 | store(aStore), | |
83 | fileNo(aFileNo), | |
84 | buf(nullptr), | |
85 | bufSize(0), | |
86 | bufWritten(0) | |
87 | { | |
88 | Must(entry); | |
89 | } | |
90 | ||
91 | void | |
92 | ShmWriter::append(const char *aBuf, int aBufSize) | |
93 | { | |
94 | Must(!buf); | |
95 | buf = aBuf; | |
96 | bufSize = aBufSize; | |
97 | if (bufSize) { | |
98 | Must(buf); | |
99 | bufWritten = 0; | |
100 | copyToShm(); | |
101 | } | |
102 | buf = nullptr; | |
103 | bufSize = 0; | |
104 | bufWritten = 0; | |
105 | } | |
106 | ||
107 | void | |
108 | ShmWriter::vappendf(const char *fmt, va_list ap) | |
109 | { | |
110 | SBuf vaBuf; | |
111 | #if defined(VA_COPY) | |
112 | va_list apCopy; | |
113 | VA_COPY(apCopy, ap); | |
114 | vaBuf.vappendf(fmt, apCopy); | |
115 | va_end(apCopy); | |
116 | #else | |
117 | vaBuf.vappendf(fmt, ap); | |
118 | #endif | |
119 | append(vaBuf.rawContent(), vaBuf.length()); | |
120 | } | |
121 | ||
122 | /// copies the entire buffer to shared memory | |
123 | void | |
124 | ShmWriter::copyToShm() | |
125 | { | |
126 | Must(bufSize > 0); // do not use up shared memory pages for nothing | |
127 | Must(firstSlice < 0 || lastSlice >= 0); | |
128 | ||
129 | // fill, skip slices that are already full | |
130 | while (bufWritten < bufSize) { | |
131 | Ipc::StoreMap::Slice &slice = store.nextAppendableSlice(fileNo, lastSlice); | |
132 | if (firstSlice < 0) | |
133 | firstSlice = lastSlice; | |
134 | copyToShmSlice(slice); | |
135 | } | |
136 | ||
137 | debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry); | |
138 | } | |
139 | ||
140 | /// copies at most one slice worth of buffer to shared memory | |
141 | void | |
142 | ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice &slice) | |
143 | { | |
144 | Ipc::Mem::PageId page = store.pageForSlice(lastSlice); | |
145 | debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " << | |
146 | page); | |
147 | ||
148 | Must(bufWritten <= bufSize); | |
149 | const int64_t writingDebt = bufSize - bufWritten; | |
150 | const int64_t pageSize = Ipc::Mem::PageSize(); | |
151 | const int64_t sliceOffset = totalWritten % pageSize; | |
152 | const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset); | |
153 | memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten, | |
154 | copySize); | |
155 | ||
156 | debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " << | |
157 | entry << " from " << sliceOffset << " in " << page); | |
158 | ||
159 | slice.size += copySize; | |
160 | bufWritten += copySize; | |
161 | totalWritten += copySize; | |
162 | // fresh anchor.basics.swap_file_sz is already set [to the stale value] | |
163 | ||
164 | // either we wrote everything or we filled the entire slice | |
165 | Must(bufWritten == bufSize || sliceOffset + copySize == pageSize); | |
166 | } | |
167 | ||
168 | /* MemStore */ | |
169 | ||
06684a9b | 170 | MemStore::MemStore(): map(NULL), lastWritingSlice(-1) |
9487bae9 AR |
171 | { |
172 | } | |
173 | ||
174 | MemStore::~MemStore() | |
175 | { | |
176 | delete map; | |
177 | } | |
178 | ||
179 | void | |
9199139f AR |
180 | MemStore::init() |
181 | { | |
a4555399 AR |
182 | const int64_t entryLimit = EntryLimit(); |
183 | if (entryLimit <= 0) | |
184 | return; // no memory cache configured or a misconfiguration | |
185 | ||
06684a9b AR |
186 | // check compatibility with the disk cache, if any |
187 | if (Config.cacheSwap.n_configured > 0) { | |
9d4e9cfb AR |
188 | const int64_t diskMaxSize = Store::Root().maxObjectSize(); |
189 | const int64_t memMaxSize = maxObjectSize(); | |
190 | if (diskMaxSize == -1) { | |
191 | debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size " | |
192 | "is unlimited but mem-cache maximum object size is " << | |
193 | memMaxSize / 1024.0 << " KB"); | |
194 | } else if (diskMaxSize > memMaxSize) { | |
195 | debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size " | |
196 | "is too large for mem-cache: " << | |
197 | diskMaxSize / 1024.0 << " KB > " << | |
198 | memMaxSize / 1024.0 << " KB"); | |
199 | } | |
06684a9b | 200 | } |
af2fda07 | 201 | |
06684a9b | 202 | freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel); |
1860fbac | 203 | extras = shm_old(Extras)(ExtrasLabel); |
06684a9b AR |
204 | |
205 | Must(!map); | |
206 | map = new MemStoreMap(MapLabel); | |
a4555399 | 207 | map->cleaner = this; |
9487bae9 AR |
208 | } |
209 | ||
93bc1434 AR |
210 | void |
211 | MemStore::getStats(StoreInfoStats &stats) const | |
212 | { | |
213 | const size_t pageSize = Ipc::Mem::PageSize(); | |
214 | ||
215 | stats.mem.shared = true; | |
216 | stats.mem.capacity = | |
217 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
218 | stats.mem.size = | |
219 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
220 | stats.mem.count = currentCount(); | |
221 | } | |
222 | ||
9487bae9 | 223 | void |
c4e688b7 | 224 | MemStore::stat(StoreEntry &e) const |
9487bae9 | 225 | { |
c4e688b7 AR |
226 | storeAppendPrintf(&e, "\n\nShared Memory Cache\n"); |
227 | ||
06684a9b AR |
228 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); |
229 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
230 | currentSize() / 1024.0, | |
231 | Math::doublePercent(currentSize(), maxSize())); | |
c4e688b7 AR |
232 | |
233 | if (map) { | |
36c84e19 AR |
234 | const int entryLimit = map->entryLimit(); |
235 | const int slotLimit = map->sliceLimit(); | |
236 | storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit); | |
237 | if (entryLimit > 0) { | |
c91ca3ce | 238 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", |
36c84e19 AR |
239 | currentCount(), (100.0 * currentCount() / entryLimit)); |
240 | } | |
c4e688b7 | 241 | |
36c84e19 AR |
242 | storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit); |
243 | if (slotLimit > 0) { | |
06684a9b AR |
244 | const unsigned int slotsFree = |
245 | Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage); | |
36c84e19 AR |
246 | if (slotsFree <= static_cast<const unsigned int>(slotLimit)) { |
247 | const int usedSlots = slotLimit - static_cast<const int>(slotsFree); | |
06684a9b | 248 | storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n", |
36c84e19 | 249 | usedSlots, (100.0 * usedSlots / slotLimit)); |
06684a9b AR |
250 | } |
251 | ||
36c84e19 | 252 | if (slotLimit < 100) { // XXX: otherwise too expensive to count |
c4e688b7 AR |
253 | Ipc::ReadWriteLockStats stats; |
254 | map->updateStats(stats); | |
255 | stats.dump(e); | |
9199139f AR |
256 | } |
257 | } | |
258 | } | |
9487bae9 AR |
259 | } |
260 | ||
261 | void | |
262 | MemStore::maintain() | |
263 | { | |
264 | } | |
265 | ||
266 | uint64_t | |
267 | MemStore::minSize() const | |
268 | { | |
269 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
270 | } | |
271 | ||
272 | uint64_t | |
273 | MemStore::maxSize() const | |
274 | { | |
06684a9b | 275 | return Config.memMaxSize; |
9487bae9 AR |
276 | } |
277 | ||
39c1e1d9 DK |
278 | uint64_t |
279 | MemStore::currentSize() const | |
280 | { | |
06684a9b | 281 | return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * |
9d4e9cfb | 282 | Ipc::Mem::PageSize(); |
39c1e1d9 DK |
283 | } |
284 | ||
285 | uint64_t | |
286 | MemStore::currentCount() const | |
287 | { | |
288 | return map ? map->entryCount() : 0; | |
289 | } | |
290 | ||
af2fda07 DK |
291 | int64_t |
292 | MemStore::maxObjectSize() const | |
293 | { | |
06684a9b | 294 | return min(Config.Store.maxInMemObjSize, Config.memMaxSize); |
af2fda07 DK |
295 | } |
296 | ||
9487bae9 AR |
297 | void |
298 | MemStore::reference(StoreEntry &) | |
299 | { | |
300 | } | |
301 | ||
4c973beb | 302 | bool |
2745fea5 | 303 | MemStore::dereference(StoreEntry &) |
9487bae9 | 304 | { |
4c973beb AR |
305 | // no need to keep e in the global store_table for us; we have our own map |
306 | return false; | |
9487bae9 AR |
307 | } |
308 | ||
9487bae9 AR |
309 | StoreEntry * |
310 | MemStore::get(const cache_key *key) | |
311 | { | |
312 | if (!map) | |
313 | return NULL; | |
314 | ||
9487bae9 | 315 | sfileno index; |
10dc0fe6 | 316 | const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index); |
9487bae9 AR |
317 | if (!slot) |
318 | return NULL; | |
319 | ||
9487bae9 AR |
320 | // create a brand new store entry and initialize it with stored info |
321 | StoreEntry *e = new StoreEntry(); | |
9487bae9 | 322 | |
ce49546e AR |
323 | // XXX: We do not know the URLs yet, only the key, but we need to parse and |
324 | // store the response for the Root().get() callers to be happy because they | |
325 | // expect IN_MEMORY entries to already have the response headers and body. | |
c877c0bc | 326 | e->makeMemObject(); |
9487bae9 | 327 | |
ce49546e | 328 | anchorEntry(*e, index, *slot); |
9487bae9 | 329 | |
06684a9b | 330 | const bool copied = copyFromShm(*e, index, *slot); |
9487bae9 | 331 | |
9487bae9 AR |
332 | if (copied) { |
333 | e->hashInsert(key); | |
334 | return e; | |
335 | } | |
336 | ||
337 | debugs(20, 3, HERE << "mem-loading failed; freeing " << index); | |
10dc0fe6 | 338 | map->freeEntry(index); // do not let others into the same trap |
9487bae9 AR |
339 | return NULL; |
340 | } | |
341 | ||
abf396ec AR |
342 | void |
343 | MemStore::updateHeaders(StoreEntry *updatedE) | |
344 | { | |
345 | if (!map) | |
346 | return; | |
347 | ||
348 | Ipc::StoreMapUpdate update(updatedE); | |
349 | assert(updatedE); | |
350 | assert(updatedE->mem_obj); | |
351 | if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index)) | |
352 | return; | |
353 | ||
354 | try { | |
355 | updateHeadersOrThrow(update); | |
356 | } catch (const std::exception &ex) { | |
357 | debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what()); | |
358 | map->abortUpdating(update); | |
359 | } | |
360 | } | |
361 | ||
362 | void | |
363 | MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate &update) | |
364 | { | |
365 | // our +/- hdr_sz math below does not work if the chains differ [in size] | |
366 | Must(update.stale.anchor->basics.swap_file_sz == update.fresh.anchor->basics.swap_file_sz); | |
367 | ||
368 | const HttpReply *rawReply = update.entry->getReply(); | |
369 | Must(rawReply); | |
370 | const HttpReply &reply = *rawReply; | |
371 | const uint64_t staleHdrSz = reply.hdr_sz; | |
372 | debugs(20, 7, "stale hdr_sz: " << staleHdrSz); | |
373 | ||
374 | /* we will need to copy same-slice payload after the stored headers later */ | |
375 | Must(staleHdrSz > 0); | |
376 | update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz); | |
377 | Must(update.stale.splicingPoint >= 0); | |
378 | Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz); | |
379 | ||
380 | Must(update.stale.anchor); | |
381 | ShmWriter writer(*this, update.entry, update.fresh.fileNo); | |
382 | reply.packHeadersInto(&writer); | |
383 | const uint64_t freshHdrSz = writer.totalWritten; | |
384 | debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz)); | |
385 | ||
386 | /* copy same-slice payload remaining after the stored headers */ | |
387 | const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint); | |
388 | const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize(); | |
389 | const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity; | |
390 | Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier | |
391 | Must(slice.size >= headersInLastSlice); | |
392 | const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice; | |
393 | const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint]; | |
394 | char *page = static_cast<char*>(PagePointer(extra.page)); | |
395 | debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice); | |
396 | writer.append(page + headersInLastSlice, payloadInLastSlice); | |
397 | update.fresh.splicingPoint = writer.lastSlice; | |
398 | ||
399 | update.fresh.anchor->basics.swap_file_sz -= staleHdrSz; | |
400 | update.fresh.anchor->basics.swap_file_sz += freshHdrSz; | |
401 | ||
402 | map->closeForUpdating(update); | |
403 | } | |
404 | ||
ce49546e | 405 | bool |
4475555f | 406 | MemStore::anchorCollapsed(StoreEntry &collapsed, bool &inSync) |
ce49546e AR |
407 | { |
408 | if (!map) | |
409 | return false; | |
410 | ||
411 | sfileno index; | |
412 | const Ipc::StoreMapAnchor *const slot = map->openForReading( | |
f53969cc | 413 | reinterpret_cast<cache_key*>(collapsed.key), index); |
ce49546e AR |
414 | if (!slot) |
415 | return false; | |
416 | ||
417 | anchorEntry(collapsed, index, *slot); | |
4475555f AR |
418 | inSync = updateCollapsedWith(collapsed, index, *slot); |
419 | return true; // even if inSync is false | |
ce49546e AR |
420 | } |
421 | ||
422 | bool | |
423 | MemStore::updateCollapsed(StoreEntry &collapsed) | |
424 | { | |
c877c0bc | 425 | assert(collapsed.mem_obj); |
ce49546e | 426 | |
9d4e9cfb | 427 | const sfileno index = collapsed.mem_obj->memCache.index; |
ce49546e AR |
428 | |
429 | // already disconnected from the cache, no need to update | |
9d4e9cfb | 430 | if (index < 0) |
ce49546e AR |
431 | return true; |
432 | ||
4475555f AR |
433 | if (!map) |
434 | return false; | |
435 | ||
ce49546e AR |
436 | const Ipc::StoreMapAnchor &anchor = map->readableEntry(index); |
437 | return updateCollapsedWith(collapsed, index, anchor); | |
438 | } | |
439 | ||
e6d2c263 | 440 | /// updates collapsed entry after its anchor has been located |
ce49546e AR |
441 | bool |
442 | MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor) | |
443 | { | |
e6d2c263 | 444 | collapsed.swap_file_sz = anchor.basics.swap_file_sz; |
ce49546e | 445 | const bool copied = copyFromShm(collapsed, index, anchor); |
e6d2c263 | 446 | return copied; |
ce49546e AR |
447 | } |
448 | ||
449 | /// anchors StoreEntry to an already locked map entry | |
450 | void | |
451 | MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor) | |
452 | { | |
453 | const Ipc::StoreMapAnchor::Basics &basics = anchor.basics; | |
454 | ||
455 | e.swap_file_sz = basics.swap_file_sz; | |
456 | e.lastref = basics.lastref; | |
457 | e.timestamp = basics.timestamp; | |
458 | e.expires = basics.expires; | |
438b41ba | 459 | e.lastModified(basics.lastmod); |
ce49546e AR |
460 | e.refcount = basics.refcount; |
461 | e.flags = basics.flags; | |
462 | ||
463 | assert(e.mem_obj); | |
4475555f AR |
464 | if (anchor.complete()) { |
465 | e.store_status = STORE_OK; | |
466 | e.mem_obj->object_sz = e.swap_file_sz; | |
1bfe9ade | 467 | e.setMemStatus(IN_MEMORY); |
4475555f AR |
468 | } else { |
469 | e.store_status = STORE_PENDING; | |
470 | assert(e.mem_obj->object_sz < 0); | |
1bfe9ade | 471 | e.setMemStatus(NOT_IN_MEMORY); |
4475555f | 472 | } |
ce49546e AR |
473 | assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor |
474 | e.ping_status = PING_NONE; | |
475 | ||
ce49546e | 476 | EBIT_CLR(e.flags, RELEASE_REQUEST); |
39fe14b2 | 477 | e.clearPrivate(); |
ce49546e | 478 | EBIT_SET(e.flags, ENTRY_VALIDATED); |
4475555f AR |
479 | |
480 | MemObject::MemCache &mc = e.mem_obj->memCache; | |
481 | mc.index = index; | |
99921d9d | 482 | mc.io = MemObject::ioReading; |
ce49546e AR |
483 | } |
484 | ||
06684a9b | 485 | /// copies the entire entry from shared to local memory |
9487bae9 | 486 | bool |
06684a9b | 487 | MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor) |
9487bae9 | 488 | { |
06684a9b | 489 | debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start); |
4475555f | 490 | assert(e.mem_obj); |
9487bae9 | 491 | |
9487bae9 AR |
492 | // emulate the usual Store code but w/o inapplicable checks and callbacks: |
493 | ||
ce49546e AR |
494 | Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid |
495 | bool wasEof = anchor.complete() && sid < 0; | |
496 | int64_t sliceOffset = 0; | |
06684a9b AR |
497 | while (sid >= 0) { |
498 | const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid); | |
ce49546e AR |
499 | // slice state may change during copying; take snapshots now |
500 | wasEof = anchor.complete() && slice.next < 0; | |
501 | const Ipc::StoreMapSlice::Size wasSize = slice.size; | |
4475555f AR |
502 | |
503 | debugs(20, 9, "entry " << index << " slice " << sid << " eof " << | |
504 | wasEof << " wasSize " << wasSize << " <= " << | |
505 | anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset << | |
506 | " mem.endOffset " << e.mem_obj->endOffset()); | |
9d4e9cfb AR |
507 | |
508 | if (e.mem_obj->endOffset() < sliceOffset + wasSize) { | |
ce49546e AR |
509 | // size of the slice data that we already copied |
510 | const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset; | |
511 | assert(prefixSize <= wasSize); | |
512 | ||
1860fbac AR |
513 | const MemStoreMapExtras::Item &extra = extras->items[sid]; |
514 | ||
515 | char *page = static_cast<char*>(PagePointer(extra.page)); | |
ce49546e AR |
516 | const StoreIOBuffer sliceBuf(wasSize - prefixSize, |
517 | e.mem_obj->endOffset(), | |
518 | page + prefixSize); | |
519 | if (!copyFromShmSlice(e, sliceBuf, wasEof)) | |
520 | return false; | |
521 | debugs(20, 9, "entry " << index << " copied slice " << sid << | |
1860fbac | 522 | " from " << extra.page << '+' << prefixSize); |
ce49546e AR |
523 | } |
524 | // else skip a [possibly incomplete] slice that we copied earlier | |
525 | ||
526 | // careful: the slice may have grown _and_ gotten the next slice ID! | |
527 | if (slice.next >= 0) { | |
528 | assert(!wasEof); | |
529 | // here we know that slice.size may not change any more | |
530 | if (wasSize >= slice.size) { // did not grow since we started copying | |
531 | sliceOffset += wasSize; | |
532 | sid = slice.next; | |
9d4e9cfb | 533 | } |
ce49546e AR |
534 | } else if (wasSize >= slice.size) { // did not grow |
535 | break; | |
536 | } | |
537 | } | |
538 | ||
539 | if (!wasEof) { | |
540 | debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' << | |
541 | anchor.basics.swap_file_sz << " bytes of " << e); | |
542 | return true; | |
9487bae9 | 543 | } |
9487bae9 | 544 | |
06684a9b AR |
545 | debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' << |
546 | anchor.basics.swap_file_sz << " bytes of " << e); | |
99921d9d AR |
547 | |
548 | // from StoreEntry::complete() | |
549 | e.mem_obj->object_sz = e.mem_obj->endOffset(); | |
550 | e.store_status = STORE_OK; | |
1bfe9ade | 551 | e.setMemStatus(IN_MEMORY); |
99921d9d | 552 | |
06684a9b AR |
553 | assert(e.mem_obj->object_sz >= 0); |
554 | assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz); | |
9487bae9 AR |
555 | // would be nice to call validLength() here, but it needs e.key |
556 | ||
99921d9d | 557 | // we read the entire response into the local memory; no more need to lock |
29c56e41 | 558 | disconnect(e); |
9487bae9 AR |
559 | return true; |
560 | } | |
561 | ||
06684a9b AR |
562 | /// imports one shared memory slice into local memory |
563 | bool | |
ce49546e | 564 | MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof) |
06684a9b AR |
565 | { |
566 | debugs(20, 7, "buf: " << buf.offset << " + " << buf.length); | |
567 | ||
568 | // from store_client::readBody() | |
569 | // parse headers if needed; they might span multiple slices! | |
570 | HttpReply *rep = (HttpReply *)e.getReply(); | |
fb654382 | 571 | if (rep->pstate < Http::Message::psParsed) { |
06684a9b AR |
572 | // XXX: have to copy because httpMsgParseStep() requires 0-termination |
573 | MemBuf mb; | |
574 | mb.init(buf.length+1, buf.length+1); | |
9d4e9cfb | 575 | mb.append(buf.data, buf.length); |
06684a9b AR |
576 | mb.terminate(); |
577 | const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof); | |
578 | if (result > 0) { | |
fb654382 | 579 | assert(rep->pstate == Http::Message::psParsed); |
4475555f | 580 | EBIT_CLR(e.flags, ENTRY_FWD_HDR_WAIT); |
06684a9b AR |
581 | } else if (result < 0) { |
582 | debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e); | |
583 | return false; | |
584 | } else { // more slices are needed | |
585 | assert(!eof); | |
586 | } | |
587 | } | |
588 | debugs(20, 7, "rep pstate: " << rep->pstate); | |
589 | ||
590 | // local memory stores both headers and body so copy regardless of pstate | |
591 | const int64_t offBefore = e.mem_obj->endOffset(); | |
592 | assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write() | |
593 | const int64_t offAfter = e.mem_obj->endOffset(); | |
594 | // expect to write the entire buf because StoreEntry::write() never fails | |
595 | assert(offAfter >= 0 && offBefore <= offAfter && | |
596 | static_cast<size_t>(offAfter - offBefore) == buf.length); | |
597 | return true; | |
598 | } | |
599 | ||
4475555f | 600 | /// whether we should cache the entry |
96a7de88 | 601 | bool |
97754f5a | 602 | MemStore::shouldCache(StoreEntry &e) const |
9487bae9 | 603 | { |
4475555f AR |
604 | if (e.mem_status == IN_MEMORY) { |
605 | debugs(20, 5, "already loaded from mem-cache: " << e); | |
606 | return false; | |
607 | } | |
608 | ||
609 | if (e.mem_obj && e.mem_obj->memCache.offset > 0) { | |
610 | debugs(20, 5, "already written to mem-cache: " << e); | |
611 | return false; | |
612 | } | |
613 | ||
9487bae9 AR |
614 | if (!e.memoryCachable()) { |
615 | debugs(20, 7, HERE << "Not memory cachable: " << e); | |
96a7de88 DK |
616 | return false; // will not cache due to entry state or properties |
617 | } | |
618 | ||
619 | assert(e.mem_obj); | |
b8a899c0 | 620 | |
90ab8f20 | 621 | if (!e.mem_obj->vary_headers.isEmpty()) { |
b8a899c0 AR |
622 | // XXX: We must store/load SerialisedMetaData to cache Vary in RAM |
623 | debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers); | |
624 | return false; | |
625 | } | |
626 | ||
96a7de88 | 627 | const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 |
4475555f | 628 | const int64_t loadedSize = e.mem_obj->endOffset(); |
96a7de88 | 629 | const int64_t ramSize = max(loadedSize, expectedSize); |
06684a9b | 630 | if (ramSize > maxObjectSize()) { |
96a7de88 DK |
631 | debugs(20, 5, HERE << "Too big max(" << |
632 | loadedSize << ", " << expectedSize << "): " << e); | |
633 | return false; // will not cache due to cachable entry size limits | |
634 | } | |
635 | ||
97754f5a AR |
636 | if (!e.mem_obj->isContiguous()) { |
637 | debugs(20, 5, "not contiguous"); | |
638 | return false; | |
639 | } | |
640 | ||
4475555f AR |
641 | if (!map) { |
642 | debugs(20, 5, HERE << "No map to mem-cache " << e); | |
643 | return false; | |
644 | } | |
645 | ||
99921d9d | 646 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) { |
539283df | 647 | debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e); |
99921d9d AR |
648 | return false; |
649 | } | |
650 | ||
96a7de88 DK |
651 | return true; |
652 | } | |
653 | ||
4475555f AR |
654 | /// locks map anchor and preps to store the entry in shared memory |
655 | bool | |
656 | MemStore::startCaching(StoreEntry &e) | |
96a7de88 | 657 | { |
4475555f AR |
658 | sfileno index = 0; |
659 | Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index); | |
660 | if (!slot) { | |
661 | debugs(20, 5, HERE << "No room in mem-cache map to index " << e); | |
662 | return false; | |
449ca8c5 AR |
663 | } |
664 | ||
4475555f AR |
665 | assert(e.mem_obj); |
666 | e.mem_obj->memCache.index = index; | |
99921d9d | 667 | e.mem_obj->memCache.io = MemObject::ioWriting; |
4475555f | 668 | slot->set(e); |
5ca027f0 AR |
669 | // Do not allow others to feed off an unknown-size entry because we will |
670 | // stop swapping it out if it grows too large. | |
671 | if (e.mem_obj->expectedReplySize() >= 0) | |
672 | map->startAppending(index); | |
0cdcf3d7 | 673 | e.memOutDecision(true); |
4475555f AR |
674 | return true; |
675 | } | |
676 | ||
677 | /// copies all local data to shared memory | |
678 | void | |
679 | MemStore::copyToShm(StoreEntry &e) | |
680 | { | |
681 | // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and | |
682 | // not knowing when the wait is over | |
683 | if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) { | |
684 | debugs(20, 5, "postponing copying " << e << " for ENTRY_FWD_HDR_WAIT"); | |
9d4e9cfb | 685 | return; |
06684a9b AR |
686 | } |
687 | ||
4475555f | 688 | assert(map); |
9487bae9 | 689 | assert(e.mem_obj); |
449ca8c5 | 690 | |
4475555f AR |
691 | const int64_t eSize = e.mem_obj->endOffset(); |
692 | if (e.mem_obj->memCache.offset >= eSize) { | |
693 | debugs(20, 5, "postponing copying " << e << " for lack of news: " << | |
694 | e.mem_obj->memCache.offset << " >= " << eSize); | |
695 | return; // nothing to do (yet) | |
96a7de88 DK |
696 | } |
697 | ||
5ca027f0 AR |
698 | // throw if an accepted unknown-size entry grew too big or max-size changed |
699 | Must(eSize <= maxObjectSize()); | |
700 | ||
abf396ec AR |
701 | const int32_t index = e.mem_obj->memCache.index; |
702 | assert(index >= 0); | |
703 | Ipc::StoreMapAnchor &anchor = map->writeableEntry(index); | |
4475555f | 704 | lastWritingSlice = anchor.start; |
9487bae9 | 705 | |
4475555f AR |
706 | // fill, skip slices that are already full |
707 | // Optimize: remember lastWritingSlice in e.mem_obj | |
708 | while (e.mem_obj->memCache.offset < eSize) { | |
abf396ec AR |
709 | Ipc::StoreMap::Slice &slice = nextAppendableSlice( |
710 | e.mem_obj->memCache.index, lastWritingSlice); | |
711 | if (anchor.start < 0) | |
712 | anchor.start = lastWritingSlice; | |
713 | copyToShmSlice(e, anchor, slice); | |
9487bae9 | 714 | } |
06684a9b | 715 | |
4475555f | 716 | debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e); |
9487bae9 AR |
717 | } |
718 | ||
4475555f AR |
719 | /// copies at most one slice worth of local memory to shared memory |
720 | void | |
abf396ec | 721 | MemStore::copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice) |
06684a9b | 722 | { |
abf396ec | 723 | Ipc::Mem::PageId page = pageForSlice(lastWritingSlice); |
4475555f AR |
724 | debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " << |
725 | page); | |
9487bae9 AR |
726 | |
727 | const int64_t bufSize = Ipc::Mem::PageSize(); | |
4475555f AR |
728 | const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize; |
729 | StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset, | |
730 | static_cast<char*>(PagePointer(page)) + sliceOffset); | |
9199139f | 731 | |
9487bae9 AR |
732 | // check that we kept everything or purge incomplete/sparse cached entry |
733 | const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace); | |
06684a9b | 734 | if (copied <= 0) { |
4475555f AR |
735 | debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) << |
736 | " bytes of " << e << " from " << e.mem_obj->memCache.offset << | |
737 | " in " << page); | |
738 | throw TexcHere("data_hdr.copy failure"); | |
9487bae9 AR |
739 | } |
740 | ||
06684a9b | 741 | debugs(20, 7, "mem-cached " << copied << " bytes of " << e << |
4475555f | 742 | " from " << e.mem_obj->memCache.offset << " in " << page); |
06684a9b | 743 | |
4475555f AR |
744 | slice.size += copied; |
745 | e.mem_obj->memCache.offset += copied; | |
76ba3c8a | 746 | anchor.basics.swap_file_sz = e.mem_obj->memCache.offset; |
9487bae9 | 747 | } |
7f6748c8 | 748 | |
abf396ec AR |
749 | /// starts checking with the entry chain slice at a given offset and |
750 | /// returns a not-full (but not necessarily empty) slice, updating sliceOffset | |
751 | Ipc::StoreMap::Slice & | |
752 | MemStore::nextAppendableSlice(const sfileno fileNo, sfileno &sliceOffset) | |
753 | { | |
754 | // allocate the very first slot for the entry if needed | |
755 | if (sliceOffset < 0) { | |
756 | Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo); | |
757 | Must(anchor.start < 0); | |
758 | Ipc::Mem::PageId page; | |
759 | sliceOffset = reserveSapForWriting(page); // throws | |
760 | extras->items[sliceOffset].page = page; | |
761 | anchor.start = sliceOffset; | |
762 | } | |
763 | ||
764 | const size_t sliceCapacity = Ipc::Mem::PageSize(); | |
765 | do { | |
766 | Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset); | |
767 | ||
768 | if (slice.size >= sliceCapacity) { | |
769 | if (slice.next >= 0) { | |
770 | sliceOffset = slice.next; | |
771 | continue; | |
772 | } | |
773 | ||
774 | Ipc::Mem::PageId page; | |
775 | slice.next = sliceOffset = reserveSapForWriting(page); | |
776 | extras->items[sliceOffset].page = page; | |
777 | debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset); | |
82a190b9 | 778 | continue; // to get and return the slice at the new sliceOffset |
abf396ec AR |
779 | } |
780 | ||
781 | return slice; | |
782 | } while (true); | |
783 | /* not reached */ | |
784 | } | |
785 | ||
786 | /// safely returns a previously allocated memory page for the given entry slice | |
787 | Ipc::Mem::PageId | |
788 | MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId) | |
789 | { | |
790 | Must(extras); | |
791 | Must(sliceId >= 0); | |
792 | Ipc::Mem::PageId page = extras->items[sliceId].page; | |
793 | Must(page); | |
794 | return page; | |
795 | } | |
796 | ||
06684a9b AR |
797 | /// finds a slot and a free page to fill or throws |
798 | sfileno | |
799 | MemStore::reserveSapForWriting(Ipc::Mem::PageId &page) | |
7f6748c8 | 800 | { |
06684a9b AR |
801 | Ipc::Mem::PageId slot; |
802 | if (freeSlots->pop(slot)) { | |
803 | debugs(20, 5, "got a previously free slot: " << slot); | |
804 | ||
805 | if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) { | |
806 | debugs(20, 5, "and got a previously free page: " << page); | |
807 | return slot.number - 1; | |
808 | } else { | |
809 | debugs(20, 3, "but there is no free page, returning " << slot); | |
810 | freeSlots->push(slot); | |
811 | } | |
812 | } | |
9d4e9cfb | 813 | |
06684a9b AR |
814 | // catch free slots delivered to noteFreeMapSlice() |
815 | assert(!waitingFor); | |
816 | waitingFor.slot = &slot; | |
817 | waitingFor.page = &page; | |
818 | if (map->purgeOne()) { | |
819 | assert(!waitingFor); // noteFreeMapSlice() should have cleared it | |
820 | assert(slot.set()); | |
821 | assert(page.set()); | |
822 | debugs(20, 5, "got previously busy " << slot << " and " << page); | |
823 | return slot.number - 1; | |
824 | } | |
825 | assert(waitingFor.slot == &slot && waitingFor.page == &page); | |
826 | waitingFor.slot = NULL; | |
827 | waitingFor.page = NULL; | |
828 | ||
829 | debugs(47, 3, "cannot get a slice; entries: " << map->entryCount()); | |
830 | throw TexcHere("ran out of mem-cache slots"); | |
831 | } | |
832 | ||
833 | void | |
36c84e19 | 834 | MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) |
06684a9b | 835 | { |
1860fbac | 836 | Ipc::Mem::PageId &pageId = extras->items[sliceId].page; |
06684a9b AR |
837 | debugs(20, 9, "slice " << sliceId << " freed " << pageId); |
838 | assert(pageId); | |
839 | Ipc::Mem::PageId slotId; | |
840 | slotId.pool = SpacePoolId; | |
841 | slotId.number = sliceId + 1; | |
842 | if (!waitingFor) { | |
843 | // must zero pageId before we give slice (and pageId extras!) to others | |
844 | Ipc::Mem::PutPage(pageId); | |
845 | freeSlots->push(slotId); | |
846 | } else { | |
847 | *waitingFor.slot = slotId; | |
848 | *waitingFor.page = pageId; | |
849 | waitingFor.slot = NULL; | |
850 | waitingFor.page = NULL; | |
851 | pageId = Ipc::Mem::PageId(); | |
852 | } | |
7f6748c8 AR |
853 | } |
854 | ||
ce49546e | 855 | void |
4475555f | 856 | MemStore::write(StoreEntry &e) |
ce49546e AR |
857 | { |
858 | assert(e.mem_obj); | |
4475555f AR |
859 | |
860 | debugs(20, 7, "entry " << e); | |
861 | ||
862 | switch (e.mem_obj->memCache.io) { | |
99921d9d | 863 | case MemObject::ioUndecided: |
4475555f | 864 | if (!shouldCache(e) || !startCaching(e)) { |
99921d9d | 865 | e.mem_obj->memCache.io = MemObject::ioDone; |
0cdcf3d7 | 866 | e.memOutDecision(false); |
4475555f AR |
867 | return; |
868 | } | |
869 | break; | |
9d4e9cfb | 870 | |
99921d9d AR |
871 | case MemObject::ioDone: |
872 | case MemObject::ioReading: | |
4475555f AR |
873 | return; // we should not write in all of the above cases |
874 | ||
99921d9d | 875 | case MemObject::ioWriting: |
4475555f AR |
876 | break; // already decided to write and still writing |
877 | } | |
878 | ||
879 | try { | |
880 | copyToShm(e); | |
881 | if (e.store_status == STORE_OK) // done receiving new content | |
882 | completeWriting(e); | |
99921d9d AR |
883 | else |
884 | CollapsedForwarding::Broadcast(e); | |
4475555f | 885 | return; |
9d4e9cfb | 886 | } catch (const std::exception &x) { // TODO: should we catch ... as well? |
4475555f AR |
887 | debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what()); |
888 | // fall through to the error handling code | |
889 | } | |
890 | ||
29c56e41 | 891 | disconnect(e); |
4475555f AR |
892 | } |
893 | ||
894 | void | |
895 | MemStore::completeWriting(StoreEntry &e) | |
896 | { | |
897 | assert(e.mem_obj); | |
898 | const int32_t index = e.mem_obj->memCache.index; | |
899 | assert(index >= 0); | |
900 | assert(map); | |
901 | ||
902 | debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e); | |
903 | ||
904 | e.mem_obj->memCache.index = -1; | |
99921d9d | 905 | e.mem_obj->memCache.io = MemObject::ioDone; |
4475555f | 906 | map->closeForWriting(index, false); |
99921d9d AR |
907 | |
908 | CollapsedForwarding::Broadcast(e); // before we close our transient entry! | |
909 | Store::Root().transientsCompleteWriting(e); | |
4475555f AR |
910 | } |
911 | ||
1bfe9ade AR |
912 | void |
913 | MemStore::markForUnlink(StoreEntry &e) | |
914 | { | |
915 | assert(e.mem_obj); | |
916 | if (e.mem_obj->memCache.index >= 0) | |
917 | map->freeEntry(e.mem_obj->memCache.index); | |
918 | } | |
919 | ||
4475555f AR |
920 | void |
921 | MemStore::unlink(StoreEntry &e) | |
922 | { | |
e41a207f | 923 | if (e.mem_obj && e.mem_obj->memCache.index >= 0) { |
c877c0bc | 924 | map->freeEntry(e.mem_obj->memCache.index); |
29c56e41 | 925 | disconnect(e); |
5b675efa | 926 | } else if (map) { |
29c56e41 | 927 | // the entry may have been loaded and then disconnected from the cache |
ce49546e AR |
928 | map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key)); |
929 | } | |
9d4e9cfb | 930 | |
4475555f | 931 | e.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right? |
ce49546e AR |
932 | } |
933 | ||
934 | void | |
29c56e41 | 935 | MemStore::disconnect(StoreEntry &e) |
ce49546e | 936 | { |
29c56e41 AR |
937 | assert(e.mem_obj); |
938 | MemObject &mem_obj = *e.mem_obj; | |
4475555f | 939 | if (mem_obj.memCache.index >= 0) { |
99921d9d | 940 | if (mem_obj.memCache.io == MemObject::ioWriting) { |
4475555f | 941 | map->abortWriting(mem_obj.memCache.index); |
29c56e41 AR |
942 | mem_obj.memCache.index = -1; |
943 | mem_obj.memCache.io = MemObject::ioDone; | |
944 | Store::Root().transientsAbandon(e); // broadcasts after the change | |
4475555f | 945 | } else { |
99921d9d | 946 | assert(mem_obj.memCache.io == MemObject::ioReading); |
4475555f | 947 | map->closeForReading(mem_obj.memCache.index); |
29c56e41 AR |
948 | mem_obj.memCache.index = -1; |
949 | mem_obj.memCache.io = MemObject::ioDone; | |
4475555f | 950 | } |
ce49546e AR |
951 | } |
952 | } | |
953 | ||
a4555399 AR |
954 | /// calculates maximum number of entries we need to store and map |
955 | int64_t | |
956 | MemStore::EntryLimit() | |
957 | { | |
45e8762c | 958 | if (!Config.memShared || !Config.memMaxSize) |
a4555399 AR |
959 | return 0; // no memory cache configured |
960 | ||
06684a9b AR |
961 | const int64_t minEntrySize = Ipc::Mem::PageSize(); |
962 | const int64_t entryLimit = Config.memMaxSize / minEntrySize; | |
a4555399 AR |
963 | return entryLimit; |
964 | } | |
965 | ||
21b7990f AR |
966 | /// reports our needs for shared memory pages to Ipc::Mem::Pages; |
967 | /// decides whether to use a shared memory cache or checks its configuration; | |
968 | /// and initializes shared memory segments used by MemStore | |
969 | class MemStoreRr: public Ipc::Mem::RegisteredRunner | |
ea2cdeb6 DK |
970 | { |
971 | public: | |
972 | /* RegisteredRunner API */ | |
1860fbac | 973 | MemStoreRr(): spaceOwner(NULL), mapOwner(NULL), extrasOwner(NULL) {} |
21b7990f AR |
974 | virtual void finalizeConfig(); |
975 | virtual void claimMemoryNeeds(); | |
976 | virtual void useConfig(); | |
977 | virtual ~MemStoreRr(); | |
978 | ||
979 | protected: | |
980 | /* Ipc::Mem::RegisteredRunner API */ | |
981 | virtual void create(); | |
982 | ||
983 | private: | |
984 | Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner | |
985 | MemStoreMap::Owner *mapOwner; ///< primary map Owner | |
1860fbac | 986 | Ipc::Mem::Owner<MemStoreMapExtras> *extrasOwner; ///< PageIds Owner |
ea2cdeb6 DK |
987 | }; |
988 | ||
21b7990f | 989 | RunnerRegistrationEntry(MemStoreRr); |
ea2cdeb6 | 990 | |
ea2cdeb6 | 991 | void |
21b7990f | 992 | MemStoreRr::claimMemoryNeeds() |
ea2cdeb6 DK |
993 | { |
994 | Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit()); | |
995 | } | |
996 | ||
21b7990f AR |
997 | void |
998 | MemStoreRr::finalizeConfig() | |
a4555399 | 999 | { |
57af1e3f AR |
1000 | // decide whether to use a shared memory cache if the user did not specify |
1001 | if (!Config.memShared.configured()) { | |
75777642 | 1002 | Config.memShared.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() && |
9199139f | 1003 | Config.memMaxSize > 0); |
9199139f | 1004 | } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) { |
c975f532 | 1005 | fatal("memory_cache_shared is on, but no support for shared memory detected"); |
53bbccec DK |
1006 | } else if (Config.memShared && !UsingSmp()) { |
1007 | debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only" | |
1008 | " a single worker is running"); | |
57af1e3f | 1009 | } |
45e8762c AR |
1010 | } |
1011 | ||
21b7990f AR |
1012 | void |
1013 | MemStoreRr::useConfig() | |
45e8762c AR |
1014 | { |
1015 | assert(Config.memShared.configured()); | |
21b7990f | 1016 | Ipc::Mem::RegisteredRunner::useConfig(); |
4404f1c5 DK |
1017 | } |
1018 | ||
21b7990f AR |
1019 | void |
1020 | MemStoreRr::create() | |
4404f1c5 | 1021 | { |
57af1e3f | 1022 | if (!Config.memShared) |
60be8b2d | 1023 | return; |
a4555399 | 1024 | |
4404f1c5 | 1025 | const int64_t entryLimit = MemStore::EntryLimit(); |
ea2cdeb6 DK |
1026 | if (entryLimit <= 0) { |
1027 | if (Config.memMaxSize > 0) { | |
1028 | debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" | |
1029 | << (Config.memMaxSize / 1024.0) << " KB), should be >= " << | |
1030 | (Ipc::Mem::PageSize() / 1024.0) << " KB"); | |
1031 | } | |
4404f1c5 | 1032 | return; // no memory cache configured or a misconfiguration |
ea2cdeb6 | 1033 | } |
06684a9b AR |
1034 | |
1035 | Must(!spaceOwner); | |
1036 | spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId, | |
636b913c | 1037 | entryLimit, 0); |
06684a9b AR |
1038 | Must(!mapOwner); |
1039 | mapOwner = MemStoreMap::Init(MapLabel, entryLimit); | |
1860fbac AR |
1040 | Must(!extrasOwner); |
1041 | extrasOwner = shm_new(MemStoreMapExtras)(ExtrasLabel, entryLimit); | |
a4555399 | 1042 | } |
c011f9bc DK |
1043 | |
1044 | MemStoreRr::~MemStoreRr() | |
1045 | { | |
1860fbac | 1046 | delete extrasOwner; |
06684a9b AR |
1047 | delete mapOwner; |
1048 | delete spaceOwner; | |
c011f9bc | 1049 | } |
f53969cc | 1050 |