]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemStore.cc
Implement Ipc::Mem::Level().
[thirdparty/squid.git] / src / MemStore.cc
CommitLineData
9487bae9
AR
1/*
2 * $Id$
3 *
4 * DEBUG: section 20 Memory Cache
5 *
6 */
7
8#include "config.h"
a4555399 9#include "base/RunnersRegistry.h"
9487bae9
AR
10#include "ipc/mem/Page.h"
11#include "ipc/mem/Pages.h"
12#include "MemObject.h"
13#include "MemStore.h"
14#include "HttpReply.h"
15
a4555399
AR
16/// shared memory segment path to use for MemStore maps
17static const char *ShmLabel = "cache_mem";
9487bae9
AR
18
19// XXX: support storage using more than one page per entry
20
a4555399
AR
21void
22MemStore::Init()
23{
24 const int64_t entryLimit = EntryLimit();
25 if (entryLimit <= 0)
26 return; // no memory cache configured or a misconfiguration
27
28 MemStoreMap *map = new MemStoreMap(ShmLabel, entryLimit);
29 delete map; // we just wanted to initialize shared memory segments
30}
9487bae9 31
39c1e1d9 32MemStore::MemStore(): map(NULL), cur_size(0)
9487bae9
AR
33{
34}
35
36MemStore::~MemStore()
37{
38 delete map;
39}
40
41void
a4555399
AR
42MemStore::init() {
43 const int64_t entryLimit = EntryLimit();
44 if (entryLimit <= 0)
45 return; // no memory cache configured or a misconfiguration
46
47 map = new MemStoreMap(ShmLabel);
48 map->cleaner = this;
9487bae9
AR
49}
50
51void
c4e688b7 52MemStore::stat(StoreEntry &e) const
9487bae9 53{
c4e688b7
AR
54 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
55
56 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", Config.memMaxSize/1024.0);
57
58 if (map) {
59 const int limit = map->entryLimit();
60 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
61 if (limit > 0) {
39c1e1d9
DK
62 storeAppendPrintf(&e, "Current entries: %"PRId64" %.2f%%\n",
63 currentCount(), (100.0 * currentCount() / limit));
c4e688b7
AR
64
65 if (limit < 100) { // XXX: otherwise too expensive to count
66 Ipc::ReadWriteLockStats stats;
67 map->updateStats(stats);
68 stats.dump(e);
69 }
70 }
71 }
9487bae9
AR
72}
73
74void
75MemStore::maintain()
76{
77}
78
79uint64_t
80MemStore::minSize() const
81{
82 return 0; // XXX: irrelevant, but Store parent forces us to implement this
83}
84
85uint64_t
86MemStore::maxSize() const
87{
88 return 0; // XXX: make configurable
89}
90
39c1e1d9
DK
91uint64_t
92MemStore::currentSize() const
93{
94 return cur_size >> 10;
95}
96
97uint64_t
98MemStore::currentCount() const
99{
100 return map ? map->entryCount() : 0;
101}
102
9487bae9
AR
103void
104MemStore::updateSize(int64_t eSize, int sign)
105{
106 // XXX: irrelevant, but Store parent forces us to implement this
107 fatal("MemStore::updateSize should not be called");
108}
109
110void
111MemStore::reference(StoreEntry &)
112{
113}
114
115void
116MemStore::dereference(StoreEntry &)
117{
118}
119
120int
121MemStore::callback()
122{
123 return 0;
124}
125
126StoreSearch *
127MemStore::search(String const, HttpRequest *)
128{
129 fatal("not implemented");
130 return NULL;
131}
132
133StoreEntry *
134MemStore::get(const cache_key *key)
135{
136 if (!map)
137 return NULL;
138
139 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
140 sfileno index;
141 const Ipc::StoreMapSlot *const slot = map->openForReading(key, index);
142 if (!slot)
143 return NULL;
144
145 const Ipc::StoreMapSlot::Basics &basics = slot->basics;
146 const MemStoreMap::Extras &extras = map->extras(index);
147
148 // create a brand new store entry and initialize it with stored info
149 StoreEntry *e = new StoreEntry();
150 e->lock_count = 0;
151
152 e->swap_file_sz = basics.swap_file_sz;
153 e->lastref = basics.lastref;
154 e->timestamp = basics.timestamp;
155 e->expires = basics.expires;
156 e->lastmod = basics.lastmod;
157 e->refcount = basics.refcount;
158 e->flags = basics.flags;
159
160 e->store_status = STORE_OK;
161 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
162 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
163 e->ping_status = PING_NONE;
164
165 EBIT_SET(e->flags, ENTRY_CACHABLE);
166 EBIT_CLR(e->flags, RELEASE_REQUEST);
167 EBIT_CLR(e->flags, KEY_PRIVATE);
168 EBIT_SET(e->flags, ENTRY_VALIDATED);
169
170 const bool copied = copyFromShm(*e, extras);
171
172 // we copied everything we could to local memory; no more need to lock
173 map->closeForReading(index);
174
175 if (copied) {
176 e->hashInsert(key);
177 return e;
178 }
179
180 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
181 map->free(index); // do not let others into the same trap
182 return NULL;
183}
184
185void
186MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
187{
188 // XXX: not needed but Store parent forces us to implement this
189 fatal("MemStore::get(key,callback,data) should not be called");
190}
191
192bool
193MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras)
194{
195 const Ipc::Mem::PageId &page = extras.page;
196
197 StoreIOBuffer sourceBuf(extras.storedSize, 0,
198 static_cast<char*>(PagePointer(page)));
199
200 // XXX: We do not know the URLs yet, only the key, but we need to parse and
201 // store the response for the Root().get() callers to be happy because they
202 // expect IN_MEMORY entries to already have the response headers and body.
203 // At least one caller calls createMemObject() if there is not one, so
204 // we hide the true object until that happens (to avoid leaking TBD URLs).
205 e.createMemObject("TBD", "TBD");
206
207 // emulate the usual Store code but w/o inapplicable checks and callbacks:
208
209 // from store_client::readBody():
210 HttpReply *rep = (HttpReply *)e.getReply();
211 const ssize_t end = headersEnd(sourceBuf.data, sourceBuf.length);
212 if (!rep->parseCharBuf(sourceBuf.data, end)) {
213 debugs(20, DBG_IMPORTANT, "Could not parse mem-cached headers: " << e);
214 return false;
215 }
216 // local memory stores both headers and body
217 e.mem_obj->object_sz = sourceBuf.length; // from StoreEntry::complete()
218
219 storeGetMemSpace(sourceBuf.length); // from StoreEntry::write()
220
221 assert(e.mem_obj->data_hdr.write(sourceBuf)); // from MemObject::write()
222 const int64_t written = e.mem_obj->endOffset();
30204d23
AR
223 // we should write all because StoreEntry::write() never fails
224 assert(written >= 0 &&
225 static_cast<size_t>(written) == sourceBuf.length);
9487bae9
AR
226 // would be nice to call validLength() here, but it needs e.key
227
228 debugs(20, 7, HERE << "mem-loaded all " << written << " bytes of " << e <<
229 " from " << page);
230
231 e.hideMemObject();
232
233 return true;
234}
235
236void
237MemStore::considerKeeping(StoreEntry &e)
238{
239 if (!e.memoryCachable()) {
240 debugs(20, 7, HERE << "Not memory cachable: " << e);
241 return; // cannot keep due to entry state or properties
242 }
243
244 assert(e.mem_obj);
245 if (!willFit(e.mem_obj->endOffset())) {
246 debugs(20, 5, HERE << "No mem-cache space for " << e);
247 return; // failed to free enough space
248 }
249
250 keep(e); // may still fail
251}
252
253bool
254MemStore::willFit(int64_t need)
255{
256 // TODO: obey configured maximum entry size (with page-based rounding)
30204d23 257 return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
9487bae9
AR
258}
259
260/// allocates map slot and calls copyToShm to store the entry in shared memory
261void
262MemStore::keep(StoreEntry &e)
263{
264 if (!map) {
265 debugs(20, 5, HERE << "No map to mem-cache " << e);
266 return;
267 }
268
269 sfileno index = 0;
270 Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
271 if (!slot) {
272 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
273 return;
274 }
275
276 MemStoreMap::Extras &extras = map->extras(index);
277 if (copyToShm(e, extras)) {
278 slot->set(e);
279 map->closeForWriting(index, false);
280 } else {
281 map->abortIo(index);
282 }
283}
284
285/// uses mem_hdr::copy() to copy local data to shared memory
286bool
287MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras)
288{
289 Ipc::Mem::PageId page;
290 if (!Ipc::Mem::GetPage(page)) {
291 debugs(20, 5, HERE << "No mem-cache page for " << e);
292 return false; // GetPage is responsible for any cleanup on failures
293 }
294
295 const int64_t bufSize = Ipc::Mem::PageSize();
296 const int64_t eSize = e.mem_obj->endOffset();
297
298 StoreIOBuffer sharedSpace(bufSize, 0,
299 static_cast<char*>(PagePointer(page)));
300
301 // check that we kept everything or purge incomplete/sparse cached entry
302 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
303 if (eSize != copied) {
304 debugs(20, 2, HERE << "Failed to mem-cache " << e << ": " <<
305 eSize << "!=" << copied);
306 // cleanup
307 PutPage(page);
308 return false;
309 }
310
311 debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
312 " in " << page);
313
39c1e1d9 314 cur_size += eSize;
9487bae9
AR
315 // remember storage location and size
316 extras.page = page;
317 extras.storedSize = copied;
318 return true;
319}
7f6748c8
AR
320
321void
322MemStore::cleanReadable(const sfileno fileno)
323{
324 Ipc::Mem::PutPage(map->extras(fileno).page);
39c1e1d9 325 cur_size -= map->extras(fileno).storedSize;
7f6748c8
AR
326}
327
a4555399
AR
328/// calculates maximum number of entries we need to store and map
329int64_t
330MemStore::EntryLimit()
331{
332 if (!Config.memMaxSize)
333 return 0; // no memory cache configured
334
335 // TODO: warn if we cannot support the configured maximum entry size
336 const int64_t entrySize = Ipc::Mem::PageSize(); // for now
337 const int64_t entryLimit = Config.memMaxSize / entrySize;
338 // TODO: warn if we cannot cache at least one item (misconfiguration)
339 return entryLimit;
340}
341
342
343/// initializes shared memory segments used by MemStore
344class MemStoreRr: public RegisteredRunner
345{
346public:
347 /* RegisteredRunner API */
348 virtual void run(const RunnerRegistry &);
349 // TODO: cleanup in destructor
350};
351
352RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
353
354
355void MemStoreRr::run(const RunnerRegistry &)
356{
357 // XXX: restore if (!UsingSmp()) return;
358
359 if (IamMasterProcess())
360 MemStore::Init();
361}