]> git.ipfire.org Git - thirdparty/squid.git/blame - src/Transients.cc
Do not use HERE in new or changed debugs() statements.
[thirdparty/squid.git] / src / Transients.cc
CommitLineData
9a9954ba
AR
1/*
2 * DEBUG: section 20 Storage Manager
3 *
4 */
5
6#include "squid.h"
e6d2c263 7#include "CollapsedForwarding.h"
9a9954ba
AR
8#include "base/RunnersRegistry.h"
9#include "HttpReply.h"
10#include "ipc/mem/Page.h"
11#include "ipc/mem/Pages.h"
12#include "MemObject.h"
13#include "Transients.h"
14#include "mime_header.h"
15#include "SquidConfig.h"
16#include "SquidMath.h"
17#include "StoreStats.h"
18#include "tools.h"
19
20#if HAVE_LIMITS_H
21#include <limits>
22#endif
23
24
25/// shared memory segment path to use for Transients maps
26static const char *MapLabel = "transients_map";
27
28
6919be24 29Transients::Transients(): map(NULL), locals(NULL)
9a9954ba 30{
9a9954ba
AR
31}
32
33Transients::~Transients()
34{
35 delete map;
6919be24 36 delete locals;
9a9954ba
AR
37}
38
39void
40Transients::init()
41{
42 const int64_t entryLimit = EntryLimit();
43 if (entryLimit <= 0)
44 return; // no SMP support or a misconfiguration
45
46 Must(!map);
47 map = new TransientsMap(MapLabel);
48 map->cleaner = this;
6919be24 49
8bcca0f8 50 locals = new Locals(entryLimit, 0);
9a9954ba
AR
51}
52
53void
54Transients::getStats(StoreInfoStats &stats) const
55{
56#if TRANSIENT_STATS_SUPPORTED
57 const size_t pageSize = Ipc::Mem::PageSize();
58
59 stats.mem.shared = true;
60 stats.mem.capacity =
61 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
62 stats.mem.size =
63 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.count = currentCount();
65#endif
66}
67
68void
69Transients::stat(StoreEntry &e) const
70{
71 storeAppendPrintf(&e, "\n\nTransient Objects\n");
72
73 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
74 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
75 currentSize() / 1024.0,
76 Math::doublePercent(currentSize(), maxSize()));
77
78 if (map) {
79 const int limit = map->entryLimit();
80 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
81 if (limit > 0) {
82 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
83 currentCount(), (100.0 * currentCount() / limit));
84 }
85 }
86}
87
88void
89Transients::maintain()
90{
91}
92
93uint64_t
94Transients::minSize() const
95{
96 return 0; // XXX: irrelevant, but Store parent forces us to implement this
97}
98
99uint64_t
100Transients::maxSize() const
101{
102 // Squid currently does not limit the total size of all transient objects
103 return std::numeric_limits<uint64_t>::max();
104}
105
106uint64_t
107Transients::currentSize() const
108{
109 // TODO: we do not get enough information to calculate this
110 // StoreEntry should update associated stores when its size changes
111 return 0;
112}
113
114uint64_t
115Transients::currentCount() const
116{
117 return map ? map->entryCount() : 0;
118}
119
120int64_t
121Transients::maxObjectSize() const
122{
123 // Squid currently does not limit the size of a transient object
124 return std::numeric_limits<uint64_t>::max();
125}
126
127void
128Transients::reference(StoreEntry &)
129{
130}
131
132bool
133Transients::dereference(StoreEntry &, bool)
134{
135 // no need to keep e in the global store_table for us; we have our own map
136 return false;
137}
138
139int
140Transients::callback()
141{
142 return 0;
143}
144
145StoreSearch *
146Transients::search(String const, HttpRequest *)
147{
148 fatal("not implemented");
149 return NULL;
150}
151
152StoreEntry *
153Transients::get(const cache_key *key)
154{
155 if (!map)
156 return NULL;
157
158 sfileno index;
1bfe9ade
AR
159 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
160 if (!anchor)
9a9954ba
AR
161 return NULL;
162
6919be24
AR
163 // If we already have a local entry, the store_table should have found it.
164 // Since it did not, the local entry key must have changed from public to
165 // private. We still need to keep the private entry around for syncing as
166 // its clients depend on it, but we should not allow new clients to join.
167 if (StoreEntry *oldE = locals->at(index)) {
168 debugs(20, 3, "not joining private " << *oldE);
169 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
170 } else if (StoreEntry *newE = copyFromShm(index)) {
171 return newE; // keep read lock to receive updates from others
1bfe9ade 172 }
4475555f 173
6919be24 174 // private entry or loading failure
4475555f
AR
175 map->closeForReading(index);
176 return NULL;
177}
178
179StoreEntry *
180Transients::copyFromShm(const sfileno index)
181{
9a9954ba
AR
182 const TransientsMap::Extras &extras = map->extras(index);
183
184 // create a brand new store entry and initialize it with stored info
1bfe9ade 185 StoreEntry *e = storeCreatePureEntry(extras.url, extras.url,
9a9954ba 186 extras.reqFlags, extras.reqMethod);
9a9954ba
AR
187
188 assert(e->mem_obj);
189 e->mem_obj->method = extras.reqMethod;
99921d9d 190 e->mem_obj->xitTable.io = MemObject::ioReading;
4475555f 191 e->mem_obj->xitTable.index = index;
9a9954ba 192
9a9954ba 193 e->setPublicKey();
ce49546e 194 assert(e->key);
9a9954ba 195
4475555f
AR
196 // How do we know its SMP- and not just locally-collapsed? A worker gets
197 // locally-collapsed entries from the local store_table, not Transients.
198 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
199 e->mem_obj->smpCollapsed = true;
200
6919be24
AR
201 assert(!locals->at(index));
202 // We do not lock e because we do not want to prevent its destruction;
203 // e is tied to us via mem_obj so we will know when it is destructed.
204 locals->at(index) = e;
9a9954ba
AR
205 return e;
206}
207
208void
209Transients::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
210{
211 // XXX: not needed but Store parent forces us to implement this
212 fatal("Transients::get(key,callback,data) should not be called");
213}
214
6919be24
AR
215StoreEntry *
216Transients::findCollapsed(const sfileno index)
217{
218 if (!map)
219 return NULL;
220
221 if (StoreEntry *oldE = locals->at(index)) {
222 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
223 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
224 return oldE;
225 }
226
227 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
228 return NULL;
229}
230
9a9954ba 231void
99921d9d 232Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
9a9954ba
AR
233 const HttpRequestMethod &reqMethod)
234{
235 assert(e);
4475555f
AR
236 assert(e->mem_obj);
237 assert(e->mem_obj->xitTable.index < 0);
9a9954ba
AR
238
239 if (!map) {
240 debugs(20, 5, "No map to add " << *e);
241 return;
242 }
243
244 sfileno index = 0;
245 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
246 if (!slot) {
4475555f 247 debugs(20, 5, "collision registering " << *e);
9a9954ba
AR
248 return;
249 }
250
251 try {
252 if (copyToShm(*e, index, reqFlags, reqMethod)) {
253 slot->set(*e);
99921d9d 254 e->mem_obj->xitTable.io = MemObject::ioWriting;
4475555f
AR
255 e->mem_obj->xitTable.index = index;
256 map->startAppending(index);
257 // keep write lock -- we will be supplying others with updates
9a9954ba
AR
258 return;
259 }
260 // fall through to the error handling code
261 }
262 catch (const std::exception &x) { // TODO: should we catch ... as well?
263 debugs(20, 2, "error keeping entry " << index <<
264 ' ' << *e << ": " << x.what());
265 // fall through to the error handling code
266 }
267
4475555f 268 map->abortWriting(index);
9a9954ba
AR
269}
270
271
272/// copies all relevant local data to shared memory
273bool
274Transients::copyToShm(const StoreEntry &e, const sfileno index,
275 const RequestFlags &reqFlags,
276 const HttpRequestMethod &reqMethod)
277{
278 TransientsMap::Extras &extras = map->extras(index);
279
280 const char *url = e.url();
281 const size_t urlLen = strlen(url);
282 Must(urlLen < sizeof(extras.url)); // we have space to store it all, plus 0
283 strncpy(extras.url, url, sizeof(extras.url));
284 extras.url[urlLen] = '\0';
285
286 extras.reqFlags = reqFlags;
9a9954ba
AR
287
288 Must(reqMethod != Http::METHOD_OTHER);
289 extras.reqMethod = reqMethod.id();
290
291 return true;
292}
293
294void
295Transients::noteFreeMapSlice(const sfileno sliceId)
296{
297 // TODO: we should probably find the entry being deleted and abort it
298}
299
4475555f
AR
300void
301Transients::abandon(const StoreEntry &e)
302{
303 assert(e.mem_obj && map);
304 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
1bfe9ade 305 CollapsedForwarding::Broadcast(e);
4475555f
AR
306 // We do not unlock the entry now because the problem is most likely with
307 // the server resource rather than a specific cache writer, so we want to
308 // prevent other readers from collapsing requests for that resource.
309}
310
311bool
312Transients::abandoned(const StoreEntry &e) const
313{
314 assert(e.mem_obj);
315 return abandonedAt(e.mem_obj->xitTable.index);
316}
317
318/// whether an in-transit entry at the index is now abandoned by its writer
319bool
320Transients::abandonedAt(const sfileno index) const
321{
322 assert(map);
323 return map->readableEntry(index).waitingToBeFreed;
324}
325
99921d9d
AR
326void
327Transients::completeWriting(const StoreEntry &e)
328{
329 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
330 assert(e.mem_obj->xitTable.io == MemObject::ioWriting);
6919be24
AR
331 // there will be no more updates from us after this, so we must prevent
332 // future readers from joining
333 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
99921d9d
AR
334 map->closeForWriting(e.mem_obj->xitTable.index);
335 e.mem_obj->xitTable.index = -1;
336 e.mem_obj->xitTable.io = MemObject::ioDone;
337 }
338}
339
d366a7fa
AR
340int
341Transients::readers(const StoreEntry &e) const
342{
343 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
344 assert(map);
345 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
346 }
347 return 0;
348}
349
1bfe9ade
AR
350void
351Transients::markForUnlink(StoreEntry &e)
352{
353 if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting)
354 abandon(e);
355}
356
4475555f
AR
357void
358Transients::disconnect(MemObject &mem_obj)
359{
99921d9d
AR
360 if (mem_obj.xitTable.index >= 0) {
361 assert(map);
362 if (mem_obj.xitTable.io == MemObject::ioWriting) {
363 map->abortWriting(mem_obj.xitTable.index);
364 } else {
365 assert(mem_obj.xitTable.io == MemObject::ioReading);
366 map->closeForReading(mem_obj.xitTable.index);
367 }
6919be24 368 locals->at(mem_obj.xitTable.index) = NULL;
99921d9d
AR
369 mem_obj.xitTable.index = -1;
370 mem_obj.xitTable.io = MemObject::ioDone;
371 }
4475555f
AR
372}
373
9a9954ba
AR
374/// calculates maximum number of entries we need to store and map
375int64_t
376Transients::EntryLimit()
377{
378 // TODO: we should also check whether any SMP-aware caching is configured
379 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
380 return 0; // no SMP collapsed forwarding possible or needed
381
e6d2c263 382 return 16*1024; // TODO: make configurable?
9a9954ba
AR
383}
384
385/// initializes shared memory segment used by Transients
386class TransientsRr: public Ipc::Mem::RegisteredRunner
387{
388public:
389 /* RegisteredRunner API */
390 TransientsRr(): mapOwner(NULL) {}
391 virtual void run(const RunnerRegistry &);
392 virtual ~TransientsRr();
393
394protected:
395 virtual void create(const RunnerRegistry &);
396
397private:
398 TransientsMap::Owner *mapOwner;
399};
400
401RunnerRegistrationEntry(rrAfterConfig, TransientsRr);
402
403void TransientsRr::run(const RunnerRegistry &r)
404{
405 assert(Config.memShared.configured());
406 Ipc::Mem::RegisteredRunner::run(r);
407}
408
409void TransientsRr::create(const RunnerRegistry &)
410{
9a9954ba
AR
411 if (!Config.onoff.collapsed_forwarding)
412 return;
413
414 const int64_t entryLimit = Transients::EntryLimit();
9a9954ba
AR
415 if (entryLimit <= 0)
416 return; // no SMP configured or a misconfiguration
417
418 Must(!mapOwner);
419 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
420}
421
422TransientsRr::~TransientsRr()
423{
424 delete mapOwner;
425}