]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Warn if shared memory cache is enabled in non-SMP mode.
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 20 Memory Cache
5 *
6 */
7
8 #include "config.h"
9 #include "base/RunnersRegistry.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "MemStore.h"
14 #include "HttpReply.h"
15
16 /// shared memory segment path to use for MemStore maps
17 static const char *ShmLabel = "cache_mem";
18
19 // XXX: support storage using more than one page per entry
20
21 MemStore::MemStore(): map(NULL), theCurrentSize(0)
22 {
23 }
24
25 MemStore::~MemStore()
26 {
27 delete map;
28 }
29
30 void
31 MemStore::init()
32 {
33 const int64_t entryLimit = EntryLimit();
34 if (entryLimit <= 0)
35 return; // no memory cache configured or a misconfiguration
36
37 const int64_t diskMaxSize = Store::Root().maxObjectSize();
38 const int64_t memMaxSize = maxObjectSize();
39 if (diskMaxSize == -1) {
40 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
41 "is unlimited but mem-cache maximum object size is " <<
42 memMaxSize / 1024.0 << " KB");
43 } else if (diskMaxSize > memMaxSize) {
44 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
45 "is too large for mem-cache: " <<
46 diskMaxSize / 1024.0 << " KB > " <<
47 memMaxSize / 1024.0 << " KB");
48 }
49
50 map = new MemStoreMap(ShmLabel);
51 map->cleaner = this;
52 }
53
54 void
55 MemStore::stat(StoreEntry &e) const
56 {
57 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
58
59 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", Config.memMaxSize/1024.0);
60
61 if (map) {
62 const int limit = map->entryLimit();
63 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
64 if (limit > 0) {
65 storeAppendPrintf(&e, "Current entries: %"PRId64" %.2f%%\n",
66 currentCount(), (100.0 * currentCount() / limit));
67
68 if (limit < 100) { // XXX: otherwise too expensive to count
69 Ipc::ReadWriteLockStats stats;
70 map->updateStats(stats);
71 stats.dump(e);
72 }
73 }
74 }
75 }
76
77 void
78 MemStore::maintain()
79 {
80 }
81
82 uint64_t
83 MemStore::minSize() const
84 {
85 return 0; // XXX: irrelevant, but Store parent forces us to implement this
86 }
87
88 uint64_t
89 MemStore::maxSize() const
90 {
91 return 0; // XXX: make configurable
92 }
93
94 uint64_t
95 MemStore::currentSize() const
96 {
97 return theCurrentSize;
98 }
99
100 uint64_t
101 MemStore::currentCount() const
102 {
103 return map ? map->entryCount() : 0;
104 }
105
106 int64_t
107 MemStore::maxObjectSize() const
108 {
109 return Ipc::Mem::PageSize();
110 }
111
112 void
113 MemStore::reference(StoreEntry &)
114 {
115 }
116
117 bool
118 MemStore::dereference(StoreEntry &)
119 {
120 // no need to keep e in the global store_table for us; we have our own map
121 return false;
122 }
123
124 int
125 MemStore::callback()
126 {
127 return 0;
128 }
129
130 StoreSearch *
131 MemStore::search(String const, HttpRequest *)
132 {
133 fatal("not implemented");
134 return NULL;
135 }
136
137 StoreEntry *
138 MemStore::get(const cache_key *key)
139 {
140 if (!map)
141 return NULL;
142
143 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
144 sfileno index;
145 const Ipc::StoreMapSlot *const slot = map->openForReading(key, index);
146 if (!slot)
147 return NULL;
148
149 const Ipc::StoreMapSlot::Basics &basics = slot->basics;
150 const MemStoreMap::Extras &extras = map->extras(index);
151
152 // create a brand new store entry and initialize it with stored info
153 StoreEntry *e = new StoreEntry();
154 e->lock_count = 0;
155
156 e->swap_file_sz = basics.swap_file_sz;
157 e->lastref = basics.lastref;
158 e->timestamp = basics.timestamp;
159 e->expires = basics.expires;
160 e->lastmod = basics.lastmod;
161 e->refcount = basics.refcount;
162 e->flags = basics.flags;
163
164 e->store_status = STORE_OK;
165 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
166 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
167 e->ping_status = PING_NONE;
168
169 EBIT_SET(e->flags, ENTRY_CACHABLE);
170 EBIT_CLR(e->flags, RELEASE_REQUEST);
171 EBIT_CLR(e->flags, KEY_PRIVATE);
172 EBIT_SET(e->flags, ENTRY_VALIDATED);
173
174 const bool copied = copyFromShm(*e, extras);
175
176 // we copied everything we could to local memory; no more need to lock
177 map->closeForReading(index);
178
179 if (copied) {
180 e->hashInsert(key);
181 return e;
182 }
183
184 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
185 map->free(index); // do not let others into the same trap
186 return NULL;
187 }
188
189 void
190 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
191 {
192 // XXX: not needed but Store parent forces us to implement this
193 fatal("MemStore::get(key,callback,data) should not be called");
194 }
195
196 bool
197 MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras)
198 {
199 const Ipc::Mem::PageId &page = extras.page;
200
201 StoreIOBuffer sourceBuf(extras.storedSize, 0,
202 static_cast<char*>(PagePointer(page)));
203
204 // XXX: We do not know the URLs yet, only the key, but we need to parse and
205 // store the response for the Root().get() callers to be happy because they
206 // expect IN_MEMORY entries to already have the response headers and body.
207 // At least one caller calls createMemObject() if there is not one, so
208 // we hide the true object until that happens (to avoid leaking TBD URLs).
209 e.createMemObject("TBD", "TBD");
210
211 // emulate the usual Store code but w/o inapplicable checks and callbacks:
212
213 // from store_client::readBody():
214 HttpReply *rep = (HttpReply *)e.getReply();
215 const ssize_t end = headersEnd(sourceBuf.data, sourceBuf.length);
216 if (!rep->parseCharBuf(sourceBuf.data, end)) {
217 debugs(20, DBG_IMPORTANT, "Could not parse mem-cached headers: " << e);
218 return false;
219 }
220 // local memory stores both headers and body
221 e.mem_obj->object_sz = sourceBuf.length; // from StoreEntry::complete()
222
223 storeGetMemSpace(sourceBuf.length); // from StoreEntry::write()
224
225 assert(e.mem_obj->data_hdr.write(sourceBuf)); // from MemObject::write()
226 const int64_t written = e.mem_obj->endOffset();
227 // we should write all because StoreEntry::write() never fails
228 assert(written >= 0 &&
229 static_cast<size_t>(written) == sourceBuf.length);
230 // would be nice to call validLength() here, but it needs e.key
231
232 debugs(20, 7, HERE << "mem-loaded all " << written << " bytes of " << e <<
233 " from " << page);
234
235 e.hideMemObject();
236
237 return true;
238 }
239
240 void
241 MemStore::considerKeeping(StoreEntry &e)
242 {
243 if (!e.memoryCachable()) {
244 debugs(20, 7, HERE << "Not memory cachable: " << e);
245 return; // cannot keep due to entry state or properties
246 }
247
248 assert(e.mem_obj);
249 if (!willFit(e.mem_obj->endOffset())) {
250 debugs(20, 5, HERE << "No mem-cache space for " << e);
251 return; // failed to free enough space
252 }
253
254 keep(e); // may still fail
255 }
256
257 bool
258 MemStore::willFit(int64_t need)
259 {
260 // TODO: obey configured maximum entry size (with page-based rounding)
261 return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
262 }
263
264 /// allocates map slot and calls copyToShm to store the entry in shared memory
265 void
266 MemStore::keep(StoreEntry &e)
267 {
268 if (!map) {
269 debugs(20, 5, HERE << "No map to mem-cache " << e);
270 return;
271 }
272
273 sfileno index = 0;
274 Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
275 if (!slot) {
276 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
277 return;
278 }
279
280 MemStoreMap::Extras &extras = map->extras(index);
281 if (copyToShm(e, extras)) {
282 slot->set(e);
283 map->closeForWriting(index, false);
284 } else {
285 map->abortIo(index);
286 }
287 }
288
289 /// uses mem_hdr::copy() to copy local data to shared memory
290 bool
291 MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras)
292 {
293 Ipc::Mem::PageId page;
294 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
295 debugs(20, 5, HERE << "No mem-cache page for " << e);
296 return false; // GetPage is responsible for any cleanup on failures
297 }
298
299 const int64_t bufSize = Ipc::Mem::PageSize();
300 const int64_t eSize = e.mem_obj->endOffset();
301
302 StoreIOBuffer sharedSpace(bufSize, 0,
303 static_cast<char*>(PagePointer(page)));
304
305 // check that we kept everything or purge incomplete/sparse cached entry
306 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
307 if (eSize != copied) {
308 debugs(20, 2, HERE << "Failed to mem-cache " << e << ": " <<
309 eSize << "!=" << copied);
310 // cleanup
311 PutPage(page);
312 return false;
313 }
314
315 debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
316 " in " << page);
317
318 theCurrentSize += Ipc::Mem::PageSize();
319 // remember storage location and size
320 extras.page = page;
321 extras.storedSize = copied;
322 return true;
323 }
324
325 void
326 MemStore::cleanReadable(const sfileno fileno)
327 {
328 Ipc::Mem::PutPage(map->extras(fileno).page);
329 theCurrentSize -= Ipc::Mem::PageSize();
330 }
331
332 /// calculates maximum number of entries we need to store and map
333 int64_t
334 MemStore::EntryLimit()
335 {
336 if (!Config.memMaxSize)
337 return 0; // no memory cache configured
338
339 const int64_t entrySize = Ipc::Mem::PageSize(); // for now
340 const int64_t entryLimit = Config.memMaxSize / entrySize;
341 return entryLimit;
342 }
343
344
345 /// initializes shared memory segments used by MemStore
346 class MemStoreRr: public RegisteredRunner
347 {
348 public:
349 /* RegisteredRunner API */
350 MemStoreRr(): owner(NULL) {}
351 virtual void run(const RunnerRegistry &);
352 virtual ~MemStoreRr();
353
354 private:
355 MemStoreMap::Owner *owner;
356 };
357
358 RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
359
360
361 void MemStoreRr::run(const RunnerRegistry &)
362 {
363 // decide whether to use a shared memory cache if the user did not specify
364 if (!Config.memShared.configured()) {
365 Config.memShared.configure(AtomicOperationsSupported &&
366 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
367 Config.memMaxSize > 0);
368 } else if (Config.memShared && !AtomicOperationsSupported) {
369 // bail if the user wants shared memory cache but we cannot support it
370 fatal("memory_cache_shared is on, but no support for atomic operations detected");
371 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
372 fatal("memory_cache_shared is on, but no support for shared memory detected");
373 } else if (Config.memShared && !UsingSmp()) {
374 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
375 " a single worker is running");
376 }
377
378 if (!Config.memShared)
379 return;
380
381 if (IamMasterProcess()) {
382 Must(!owner);
383 const int64_t entryLimit = MemStore::EntryLimit();
384 if (entryLimit <= 0)
385 return; // no memory cache configured or a misconfiguration
386 owner = MemStoreMap::Init(ShmLabel, entryLimit);
387 }
388 }
389
390 MemStoreRr::~MemStoreRr()
391 {
392 delete owner;
393 }