]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemStore.cc
Moved httpHdrMangleList prototype to HttpHeaderTools.h
[thirdparty/squid.git] / src / MemStore.cc
CommitLineData
9487bae9
AR
1/*
2 * $Id$
3 *
4 * DEBUG: section 20 Memory Cache
5 *
6 */
7
f7f3304a 8#include "squid.h"
a4555399 9#include "base/RunnersRegistry.h"
582c2af2 10#include "HttpReply.h"
9487bae9
AR
11#include "ipc/mem/Page.h"
12#include "ipc/mem/Pages.h"
13#include "MemObject.h"
14#include "MemStore.h"
582c2af2 15#include "protos.h"
93bc1434 16#include "StoreStats.h"
9487bae9 17
a4555399
AR
18/// shared memory segment path to use for MemStore maps
19static const char *ShmLabel = "cache_mem";
9487bae9
AR
20
21// XXX: support storage using more than one page per entry
22
17cf0a47 23MemStore::MemStore(): map(NULL), theCurrentSize(0)
9487bae9
AR
24{
25}
26
27MemStore::~MemStore()
28{
29 delete map;
30}
31
32void
9199139f
AR
33MemStore::init()
34{
a4555399
AR
35 const int64_t entryLimit = EntryLimit();
36 if (entryLimit <= 0)
37 return; // no memory cache configured or a misconfiguration
38
ed5d80d3
AR
39 const int64_t diskMaxSize = Store::Root().maxObjectSize();
40 const int64_t memMaxSize = maxObjectSize();
41 if (diskMaxSize == -1) {
42 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
43 "is unlimited but mem-cache maximum object size is " <<
44 memMaxSize / 1024.0 << " KB");
45 } else if (diskMaxSize > memMaxSize) {
46 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
47 "is too large for mem-cache: " <<
48 diskMaxSize / 1024.0 << " KB > " <<
49 memMaxSize / 1024.0 << " KB");
af2fda07
DK
50 }
51
a4555399
AR
52 map = new MemStoreMap(ShmLabel);
53 map->cleaner = this;
9487bae9
AR
54}
55
93bc1434
AR
56void
57MemStore::getStats(StoreInfoStats &stats) const
58{
59 const size_t pageSize = Ipc::Mem::PageSize();
60
61 stats.mem.shared = true;
62 stats.mem.capacity =
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.size =
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
66 stats.mem.count = currentCount();
67}
68
9487bae9 69void
c4e688b7 70MemStore::stat(StoreEntry &e) const
9487bae9 71{
c4e688b7
AR
72 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
73
74 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", Config.memMaxSize/1024.0);
75
76 if (map) {
77 const int limit = map->entryLimit();
78 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
79 if (limit > 0) {
c91ca3ce 80 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
9199139f 81 currentCount(), (100.0 * currentCount() / limit));
c4e688b7
AR
82
83 if (limit < 100) { // XXX: otherwise too expensive to count
84 Ipc::ReadWriteLockStats stats;
85 map->updateStats(stats);
86 stats.dump(e);
9199139f
AR
87 }
88 }
89 }
9487bae9
AR
90}
91
92void
93MemStore::maintain()
94{
95}
96
97uint64_t
98MemStore::minSize() const
99{
100 return 0; // XXX: irrelevant, but Store parent forces us to implement this
101}
102
103uint64_t
104MemStore::maxSize() const
105{
106 return 0; // XXX: make configurable
107}
108
39c1e1d9
DK
109uint64_t
110MemStore::currentSize() const
111{
57f583f1 112 return theCurrentSize;
39c1e1d9
DK
113}
114
115uint64_t
116MemStore::currentCount() const
117{
118 return map ? map->entryCount() : 0;
119}
120
af2fda07
DK
121int64_t
122MemStore::maxObjectSize() const
123{
124 return Ipc::Mem::PageSize();
125}
126
9487bae9
AR
127void
128MemStore::reference(StoreEntry &)
129{
130}
131
4c973beb 132bool
9487bae9
AR
133MemStore::dereference(StoreEntry &)
134{
4c973beb
AR
135 // no need to keep e in the global store_table for us; we have our own map
136 return false;
9487bae9
AR
137}
138
139int
140MemStore::callback()
141{
142 return 0;
143}
144
145StoreSearch *
146MemStore::search(String const, HttpRequest *)
147{
148 fatal("not implemented");
149 return NULL;
150}
151
152StoreEntry *
153MemStore::get(const cache_key *key)
154{
155 if (!map)
156 return NULL;
157
158 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
159 sfileno index;
160 const Ipc::StoreMapSlot *const slot = map->openForReading(key, index);
161 if (!slot)
162 return NULL;
163
164 const Ipc::StoreMapSlot::Basics &basics = slot->basics;
165 const MemStoreMap::Extras &extras = map->extras(index);
166
167 // create a brand new store entry and initialize it with stored info
168 StoreEntry *e = new StoreEntry();
169 e->lock_count = 0;
170
171 e->swap_file_sz = basics.swap_file_sz;
172 e->lastref = basics.lastref;
173 e->timestamp = basics.timestamp;
174 e->expires = basics.expires;
175 e->lastmod = basics.lastmod;
176 e->refcount = basics.refcount;
177 e->flags = basics.flags;
178
179 e->store_status = STORE_OK;
180 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
181 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
182 e->ping_status = PING_NONE;
183
184 EBIT_SET(e->flags, ENTRY_CACHABLE);
185 EBIT_CLR(e->flags, RELEASE_REQUEST);
186 EBIT_CLR(e->flags, KEY_PRIVATE);
187 EBIT_SET(e->flags, ENTRY_VALIDATED);
188
189 const bool copied = copyFromShm(*e, extras);
190
191 // we copied everything we could to local memory; no more need to lock
192 map->closeForReading(index);
193
194 if (copied) {
195 e->hashInsert(key);
196 return e;
197 }
198
199 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
200 map->free(index); // do not let others into the same trap
201 return NULL;
202}
203
204void
205MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
206{
207 // XXX: not needed but Store parent forces us to implement this
208 fatal("MemStore::get(key,callback,data) should not be called");
209}
210
211bool
212MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras)
213{
214 const Ipc::Mem::PageId &page = extras.page;
215
216 StoreIOBuffer sourceBuf(extras.storedSize, 0,
9199139f 217 static_cast<char*>(PagePointer(page)));
9487bae9
AR
218
219 // XXX: We do not know the URLs yet, only the key, but we need to parse and
220 // store the response for the Root().get() callers to be happy because they
221 // expect IN_MEMORY entries to already have the response headers and body.
222 // At least one caller calls createMemObject() if there is not one, so
223 // we hide the true object until that happens (to avoid leaking TBD URLs).
224 e.createMemObject("TBD", "TBD");
225
226 // emulate the usual Store code but w/o inapplicable checks and callbacks:
227
228 // from store_client::readBody():
229 HttpReply *rep = (HttpReply *)e.getReply();
230 const ssize_t end = headersEnd(sourceBuf.data, sourceBuf.length);
231 if (!rep->parseCharBuf(sourceBuf.data, end)) {
232 debugs(20, DBG_IMPORTANT, "Could not parse mem-cached headers: " << e);
233 return false;
234 }
235 // local memory stores both headers and body
236 e.mem_obj->object_sz = sourceBuf.length; // from StoreEntry::complete()
237
238 storeGetMemSpace(sourceBuf.length); // from StoreEntry::write()
239
240 assert(e.mem_obj->data_hdr.write(sourceBuf)); // from MemObject::write()
241 const int64_t written = e.mem_obj->endOffset();
30204d23
AR
242 // we should write all because StoreEntry::write() never fails
243 assert(written >= 0 &&
244 static_cast<size_t>(written) == sourceBuf.length);
9487bae9
AR
245 // would be nice to call validLength() here, but it needs e.key
246
247 debugs(20, 7, HERE << "mem-loaded all " << written << " bytes of " << e <<
248 " from " << page);
249
250 e.hideMemObject();
251
252 return true;
253}
254
96a7de88
DK
255bool
256MemStore::keepInLocalMemory(const StoreEntry &e) const
9487bae9
AR
257{
258 if (!e.memoryCachable()) {
259 debugs(20, 7, HERE << "Not memory cachable: " << e);
96a7de88
DK
260 return false; // will not cache due to entry state or properties
261 }
262
263 assert(e.mem_obj);
264 const int64_t loadedSize = e.mem_obj->endOffset();
265 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
266 const int64_t ramSize = max(loadedSize, expectedSize);
267
268 if (ramSize > static_cast<int64_t>(Config.Store.maxInMemObjSize)) {
269 debugs(20, 5, HERE << "Too big max(" <<
270 loadedSize << ", " << expectedSize << "): " << e);
271 return false; // will not cache due to cachable entry size limits
272 }
273
274 if (!willFit(ramSize)) {
275 debugs(20, 5, HERE << "Wont fit max(" <<
276 loadedSize << ", " << expectedSize << "): " << e);
277 return false; // will not cache due to memory cache slot limit
9487bae9
AR
278 }
279
96a7de88
DK
280 return true;
281}
282
283void
284MemStore::considerKeeping(StoreEntry &e)
285{
286 if (!keepInLocalMemory(e))
287 return;
288
449ca8c5
AR
289 // since we copy everything at once, we can only keep complete entries
290 if (e.store_status != STORE_OK) {
291 debugs(20, 7, HERE << "Incomplete: " << e);
292 return;
293 }
294
9487bae9 295 assert(e.mem_obj);
449ca8c5
AR
296
297 const int64_t loadedSize = e.mem_obj->endOffset();
298 const int64_t expectedSize = e.mem_obj->expectedReplySize();
299
96a7de88
DK
300 // objects of unknown size are not allowed into memory cache, for now
301 if (expectedSize < 0) {
302 debugs(20, 5, HERE << "Unknown expected size: " << e);
303 return;
304 }
305
449ca8c5
AR
306 // since we copy everything at once, we can only keep fully loaded entries
307 if (loadedSize != expectedSize) {
308 debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
309 expectedSize);
310 return;
311 }
312
9487bae9
AR
313 keep(e); // may still fail
314}
315
316bool
96a7de88 317MemStore::willFit(int64_t need) const
9487bae9 318{
30204d23 319 return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
9487bae9
AR
320}
321
322/// allocates map slot and calls copyToShm to store the entry in shared memory
323void
324MemStore::keep(StoreEntry &e)
325{
326 if (!map) {
327 debugs(20, 5, HERE << "No map to mem-cache " << e);
328 return;
329 }
330
331 sfileno index = 0;
332 Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
333 if (!slot) {
334 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
335 return;
336 }
337
338 MemStoreMap::Extras &extras = map->extras(index);
339 if (copyToShm(e, extras)) {
340 slot->set(e);
341 map->closeForWriting(index, false);
342 } else {
343 map->abortIo(index);
344 }
345}
346
347/// uses mem_hdr::copy() to copy local data to shared memory
348bool
349MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras)
350{
351 Ipc::Mem::PageId page;
551f8a18 352 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
9487bae9
AR
353 debugs(20, 5, HERE << "No mem-cache page for " << e);
354 return false; // GetPage is responsible for any cleanup on failures
355 }
356
357 const int64_t bufSize = Ipc::Mem::PageSize();
358 const int64_t eSize = e.mem_obj->endOffset();
359
360 StoreIOBuffer sharedSpace(bufSize, 0,
361 static_cast<char*>(PagePointer(page)));
9199139f 362
9487bae9
AR
363 // check that we kept everything or purge incomplete/sparse cached entry
364 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
365 if (eSize != copied) {
366 debugs(20, 2, HERE << "Failed to mem-cache " << e << ": " <<
367 eSize << "!=" << copied);
368 // cleanup
369 PutPage(page);
370 return false;
371 }
372
373 debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
374 " in " << page);
375
17cf0a47 376 theCurrentSize += Ipc::Mem::PageSize();
9487bae9
AR
377 // remember storage location and size
378 extras.page = page;
379 extras.storedSize = copied;
380 return true;
381}
7f6748c8
AR
382
383void
384MemStore::cleanReadable(const sfileno fileno)
385{
386 Ipc::Mem::PutPage(map->extras(fileno).page);
17cf0a47 387 theCurrentSize -= Ipc::Mem::PageSize();
7f6748c8
AR
388}
389
a4555399
AR
390/// calculates maximum number of entries we need to store and map
391int64_t
392MemStore::EntryLimit()
393{
45e8762c 394 if (!Config.memShared || !Config.memMaxSize)
a4555399
AR
395 return 0; // no memory cache configured
396
a4555399
AR
397 const int64_t entrySize = Ipc::Mem::PageSize(); // for now
398 const int64_t entryLimit = Config.memMaxSize / entrySize;
a4555399
AR
399 return entryLimit;
400}
401
ea2cdeb6
DK
402/// reports our needs for shared memory pages to Ipc::Mem::Pages
403class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
404{
405public:
406 /* RegisteredRunner API */
407 virtual void run(const RunnerRegistry &r);
408};
409
410RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
411
ea2cdeb6
DK
412void
413MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
414{
415 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
416}
417
45e8762c
AR
418/// decides whether to use a shared memory cache or checks its configuration
419class MemStoreCfgRr: public ::RegisteredRunner
a4555399
AR
420{
421public:
422 /* RegisteredRunner API */
423 virtual void run(const RunnerRegistry &);
a4555399
AR
424};
425
45e8762c 426RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
a4555399 427
45e8762c 428void MemStoreCfgRr::run(const RunnerRegistry &r)
a4555399 429{
57af1e3f
AR
430 // decide whether to use a shared memory cache if the user did not specify
431 if (!Config.memShared.configured()) {
794d4c0c 432 Config.memShared.configure(Ipc::Atomic::Enabled() &&
9199139f
AR
433 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
434 Config.memMaxSize > 0);
794d4c0c 435 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
65b81b27
AR
436 // bail if the user wants shared memory cache but we cannot support it
437 fatal("memory_cache_shared is on, but no support for atomic operations detected");
9199139f 438 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
c975f532 439 fatal("memory_cache_shared is on, but no support for shared memory detected");
53bbccec
DK
440 } else if (Config.memShared && !UsingSmp()) {
441 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
442 " a single worker is running");
57af1e3f 443 }
45e8762c
AR
444}
445
45e8762c
AR
446/// initializes shared memory segments used by MemStore
447class MemStoreRr: public Ipc::Mem::RegisteredRunner
448{
449public:
450 /* RegisteredRunner API */
451 MemStoreRr(): owner(NULL) {}
452 virtual void run(const RunnerRegistry &);
453 virtual ~MemStoreRr();
454
455protected:
456 virtual void create(const RunnerRegistry &);
57af1e3f 457
45e8762c
AR
458private:
459 MemStoreMap::Owner *owner;
460};
461
462RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
463
45e8762c
AR
464void MemStoreRr::run(const RunnerRegistry &r)
465{
466 assert(Config.memShared.configured());
4404f1c5
DK
467 Ipc::Mem::RegisteredRunner::run(r);
468}
469
470void MemStoreRr::create(const RunnerRegistry &)
471{
57af1e3f 472 if (!Config.memShared)
60be8b2d 473 return;
a4555399 474
4404f1c5
DK
475 Must(!owner);
476 const int64_t entryLimit = MemStore::EntryLimit();
ea2cdeb6
DK
477 if (entryLimit <= 0) {
478 if (Config.memMaxSize > 0) {
479 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
480 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
481 (Ipc::Mem::PageSize() / 1024.0) << " KB");
482 }
4404f1c5 483 return; // no memory cache configured or a misconfiguration
ea2cdeb6 484 }
4404f1c5 485 owner = MemStoreMap::Init(ShmLabel, entryLimit);
a4555399 486}
c011f9bc
DK
487
488MemStoreRr::~MemStoreRr()
489{
68353d5a 490 delete owner;
c011f9bc 491}