]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 20 Memory Cache
5 *
6 */
7
8 #include "squid.h"
9 #include "base/RunnersRegistry.h"
10 #include "HttpReply.h"
11 #include "ipc/mem/Page.h"
12 #include "ipc/mem/Pages.h"
13 #include "MemObject.h"
14 #include "MemStore.h"
15 #include "protos.h"
16 #include "StoreStats.h"
17
18 /// shared memory segment path to use for MemStore maps
19 static const char *ShmLabel = "cache_mem";
20
21 // XXX: support storage using more than one page per entry
22
23 MemStore::MemStore(): map(NULL), theCurrentSize(0)
24 {
25 }
26
27 MemStore::~MemStore()
28 {
29 delete map;
30 }
31
32 void
33 MemStore::init()
34 {
35 const int64_t entryLimit = EntryLimit();
36 if (entryLimit <= 0)
37 return; // no memory cache configured or a misconfiguration
38
39 const int64_t diskMaxSize = Store::Root().maxObjectSize();
40 const int64_t memMaxSize = maxObjectSize();
41 if (diskMaxSize == -1) {
42 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
43 "is unlimited but mem-cache maximum object size is " <<
44 memMaxSize / 1024.0 << " KB");
45 } else if (diskMaxSize > memMaxSize) {
46 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
47 "is too large for mem-cache: " <<
48 diskMaxSize / 1024.0 << " KB > " <<
49 memMaxSize / 1024.0 << " KB");
50 }
51
52 map = new MemStoreMap(ShmLabel);
53 map->cleaner = this;
54 }
55
56 void
57 MemStore::getStats(StoreInfoStats &stats) const
58 {
59 const size_t pageSize = Ipc::Mem::PageSize();
60
61 stats.mem.shared = true;
62 stats.mem.capacity =
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.size =
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
66 stats.mem.count = currentCount();
67 }
68
69 void
70 MemStore::stat(StoreEntry &e) const
71 {
72 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
73
74 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", Config.memMaxSize/1024.0);
75
76 if (map) {
77 const int limit = map->entryLimit();
78 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
79 if (limit > 0) {
80 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
81 currentCount(), (100.0 * currentCount() / limit));
82
83 if (limit < 100) { // XXX: otherwise too expensive to count
84 Ipc::ReadWriteLockStats stats;
85 map->updateStats(stats);
86 stats.dump(e);
87 }
88 }
89 }
90 }
91
92 void
93 MemStore::maintain()
94 {
95 }
96
97 uint64_t
98 MemStore::minSize() const
99 {
100 return 0; // XXX: irrelevant, but Store parent forces us to implement this
101 }
102
103 uint64_t
104 MemStore::maxSize() const
105 {
106 return 0; // XXX: make configurable
107 }
108
109 uint64_t
110 MemStore::currentSize() const
111 {
112 return theCurrentSize;
113 }
114
115 uint64_t
116 MemStore::currentCount() const
117 {
118 return map ? map->entryCount() : 0;
119 }
120
121 int64_t
122 MemStore::maxObjectSize() const
123 {
124 return Ipc::Mem::PageSize();
125 }
126
127 void
128 MemStore::reference(StoreEntry &)
129 {
130 }
131
132 bool
133 MemStore::dereference(StoreEntry &)
134 {
135 // no need to keep e in the global store_table for us; we have our own map
136 return false;
137 }
138
139 int
140 MemStore::callback()
141 {
142 return 0;
143 }
144
145 StoreSearch *
146 MemStore::search(String const, HttpRequest *)
147 {
148 fatal("not implemented");
149 return NULL;
150 }
151
152 StoreEntry *
153 MemStore::get(const cache_key *key)
154 {
155 if (!map)
156 return NULL;
157
158 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
159 sfileno index;
160 const Ipc::StoreMapSlot *const slot = map->openForReading(key, index);
161 if (!slot)
162 return NULL;
163
164 const Ipc::StoreMapSlot::Basics &basics = slot->basics;
165 const MemStoreMap::Extras &extras = map->extras(index);
166
167 // create a brand new store entry and initialize it with stored info
168 StoreEntry *e = new StoreEntry();
169 e->lock_count = 0;
170
171 e->swap_file_sz = basics.swap_file_sz;
172 e->lastref = basics.lastref;
173 e->timestamp = basics.timestamp;
174 e->expires = basics.expires;
175 e->lastmod = basics.lastmod;
176 e->refcount = basics.refcount;
177 e->flags = basics.flags;
178
179 e->store_status = STORE_OK;
180 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
181 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
182 e->ping_status = PING_NONE;
183
184 EBIT_SET(e->flags, ENTRY_CACHABLE);
185 EBIT_CLR(e->flags, RELEASE_REQUEST);
186 EBIT_CLR(e->flags, KEY_PRIVATE);
187 EBIT_SET(e->flags, ENTRY_VALIDATED);
188
189 const bool copied = copyFromShm(*e, extras);
190
191 // we copied everything we could to local memory; no more need to lock
192 map->closeForReading(index);
193
194 if (copied) {
195 e->hashInsert(key);
196 return e;
197 }
198
199 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
200 map->free(index); // do not let others into the same trap
201 return NULL;
202 }
203
204 void
205 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
206 {
207 // XXX: not needed but Store parent forces us to implement this
208 fatal("MemStore::get(key,callback,data) should not be called");
209 }
210
211 bool
212 MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras)
213 {
214 const Ipc::Mem::PageId &page = extras.page;
215
216 StoreIOBuffer sourceBuf(extras.storedSize, 0,
217 static_cast<char*>(PagePointer(page)));
218
219 // XXX: We do not know the URLs yet, only the key, but we need to parse and
220 // store the response for the Root().get() callers to be happy because they
221 // expect IN_MEMORY entries to already have the response headers and body.
222 // At least one caller calls createMemObject() if there is not one, so
223 // we hide the true object until that happens (to avoid leaking TBD URLs).
224 e.createMemObject("TBD", "TBD");
225
226 // emulate the usual Store code but w/o inapplicable checks and callbacks:
227
228 // from store_client::readBody():
229 HttpReply *rep = (HttpReply *)e.getReply();
230 const ssize_t end = headersEnd(sourceBuf.data, sourceBuf.length);
231 if (!rep->parseCharBuf(sourceBuf.data, end)) {
232 debugs(20, DBG_IMPORTANT, "Could not parse mem-cached headers: " << e);
233 return false;
234 }
235 // local memory stores both headers and body
236 e.mem_obj->object_sz = sourceBuf.length; // from StoreEntry::complete()
237
238 storeGetMemSpace(sourceBuf.length); // from StoreEntry::write()
239
240 assert(e.mem_obj->data_hdr.write(sourceBuf)); // from MemObject::write()
241 const int64_t written = e.mem_obj->endOffset();
242 // we should write all because StoreEntry::write() never fails
243 assert(written >= 0 &&
244 static_cast<size_t>(written) == sourceBuf.length);
245 // would be nice to call validLength() here, but it needs e.key
246
247 debugs(20, 7, HERE << "mem-loaded all " << written << " bytes of " << e <<
248 " from " << page);
249
250 e.hideMemObject();
251
252 return true;
253 }
254
255 bool
256 MemStore::keepInLocalMemory(const StoreEntry &e) const
257 {
258 if (!e.memoryCachable()) {
259 debugs(20, 7, HERE << "Not memory cachable: " << e);
260 return false; // will not cache due to entry state or properties
261 }
262
263 assert(e.mem_obj);
264 const int64_t loadedSize = e.mem_obj->endOffset();
265 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
266 const int64_t ramSize = max(loadedSize, expectedSize);
267
268 if (ramSize > static_cast<int64_t>(Config.Store.maxInMemObjSize)) {
269 debugs(20, 5, HERE << "Too big max(" <<
270 loadedSize << ", " << expectedSize << "): " << e);
271 return false; // will not cache due to cachable entry size limits
272 }
273
274 if (!willFit(ramSize)) {
275 debugs(20, 5, HERE << "Wont fit max(" <<
276 loadedSize << ", " << expectedSize << "): " << e);
277 return false; // will not cache due to memory cache slot limit
278 }
279
280 return true;
281 }
282
283 void
284 MemStore::considerKeeping(StoreEntry &e)
285 {
286 if (!keepInLocalMemory(e))
287 return;
288
289 // since we copy everything at once, we can only keep complete entries
290 if (e.store_status != STORE_OK) {
291 debugs(20, 7, HERE << "Incomplete: " << e);
292 return;
293 }
294
295 assert(e.mem_obj);
296
297 const int64_t loadedSize = e.mem_obj->endOffset();
298 const int64_t expectedSize = e.mem_obj->expectedReplySize();
299
300 // objects of unknown size are not allowed into memory cache, for now
301 if (expectedSize < 0) {
302 debugs(20, 5, HERE << "Unknown expected size: " << e);
303 return;
304 }
305
306 // since we copy everything at once, we can only keep fully loaded entries
307 if (loadedSize != expectedSize) {
308 debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
309 expectedSize);
310 return;
311 }
312
313 keep(e); // may still fail
314 }
315
316 bool
317 MemStore::willFit(int64_t need) const
318 {
319 return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
320 }
321
322 /// allocates map slot and calls copyToShm to store the entry in shared memory
323 void
324 MemStore::keep(StoreEntry &e)
325 {
326 if (!map) {
327 debugs(20, 5, HERE << "No map to mem-cache " << e);
328 return;
329 }
330
331 sfileno index = 0;
332 Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
333 if (!slot) {
334 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
335 return;
336 }
337
338 MemStoreMap::Extras &extras = map->extras(index);
339 if (copyToShm(e, extras)) {
340 slot->set(e);
341 map->closeForWriting(index, false);
342 } else {
343 map->abortIo(index);
344 }
345 }
346
347 /// uses mem_hdr::copy() to copy local data to shared memory
348 bool
349 MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras)
350 {
351 Ipc::Mem::PageId page;
352 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
353 debugs(20, 5, HERE << "No mem-cache page for " << e);
354 return false; // GetPage is responsible for any cleanup on failures
355 }
356
357 const int64_t bufSize = Ipc::Mem::PageSize();
358 const int64_t eSize = e.mem_obj->endOffset();
359
360 StoreIOBuffer sharedSpace(bufSize, 0,
361 static_cast<char*>(PagePointer(page)));
362
363 // check that we kept everything or purge incomplete/sparse cached entry
364 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
365 if (eSize != copied) {
366 debugs(20, 2, HERE << "Failed to mem-cache " << e << ": " <<
367 eSize << "!=" << copied);
368 // cleanup
369 PutPage(page);
370 return false;
371 }
372
373 debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
374 " in " << page);
375
376 theCurrentSize += Ipc::Mem::PageSize();
377 // remember storage location and size
378 extras.page = page;
379 extras.storedSize = copied;
380 return true;
381 }
382
383 void
384 MemStore::cleanReadable(const sfileno fileno)
385 {
386 Ipc::Mem::PutPage(map->extras(fileno).page);
387 theCurrentSize -= Ipc::Mem::PageSize();
388 }
389
390 /// calculates maximum number of entries we need to store and map
391 int64_t
392 MemStore::EntryLimit()
393 {
394 if (!Config.memShared || !Config.memMaxSize)
395 return 0; // no memory cache configured
396
397 const int64_t entrySize = Ipc::Mem::PageSize(); // for now
398 const int64_t entryLimit = Config.memMaxSize / entrySize;
399 return entryLimit;
400 }
401
402 /// reports our needs for shared memory pages to Ipc::Mem::Pages
403 class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
404 {
405 public:
406 /* RegisteredRunner API */
407 virtual void run(const RunnerRegistry &r);
408 };
409
410 RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
411
412 void
413 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
414 {
415 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
416 }
417
418 /// decides whether to use a shared memory cache or checks its configuration
419 class MemStoreCfgRr: public ::RegisteredRunner
420 {
421 public:
422 /* RegisteredRunner API */
423 virtual void run(const RunnerRegistry &);
424 };
425
426 RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
427
428 void MemStoreCfgRr::run(const RunnerRegistry &r)
429 {
430 // decide whether to use a shared memory cache if the user did not specify
431 if (!Config.memShared.configured()) {
432 Config.memShared.configure(Ipc::Atomic::Enabled() &&
433 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
434 Config.memMaxSize > 0);
435 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
436 // bail if the user wants shared memory cache but we cannot support it
437 fatal("memory_cache_shared is on, but no support for atomic operations detected");
438 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
439 fatal("memory_cache_shared is on, but no support for shared memory detected");
440 } else if (Config.memShared && !UsingSmp()) {
441 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
442 " a single worker is running");
443 }
444 }
445
446 /// initializes shared memory segments used by MemStore
447 class MemStoreRr: public Ipc::Mem::RegisteredRunner
448 {
449 public:
450 /* RegisteredRunner API */
451 MemStoreRr(): owner(NULL) {}
452 virtual void run(const RunnerRegistry &);
453 virtual ~MemStoreRr();
454
455 protected:
456 virtual void create(const RunnerRegistry &);
457
458 private:
459 MemStoreMap::Owner *owner;
460 };
461
462 RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
463
464 void MemStoreRr::run(const RunnerRegistry &r)
465 {
466 assert(Config.memShared.configured());
467 Ipc::Mem::RegisteredRunner::run(r);
468 }
469
470 void MemStoreRr::create(const RunnerRegistry &)
471 {
472 if (!Config.memShared)
473 return;
474
475 Must(!owner);
476 const int64_t entryLimit = MemStore::EntryLimit();
477 if (entryLimit <= 0) {
478 if (Config.memMaxSize > 0) {
479 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
480 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
481 (Ipc::Mem::PageSize() / 1024.0) << " KB");
482 }
483 return; // no memory cache configured or a misconfiguration
484 }
485 owner = MemStoreMap::Init(ShmLabel, entryLimit);
486 }
487
488 MemStoreRr::~MemStoreRr()
489 {
490 delete owner;
491 }