]> git.ipfire.org Git - thirdparty/squid.git/blame - src/Transients.cc
Tightened StoreEntry locking. Fixed entry touching and synchronization code:
[thirdparty/squid.git] / src / Transients.cc
CommitLineData
9a9954ba
AR
1/*
2 * DEBUG: section 20 Storage Manager
3 *
4 */
5
6#include "squid.h"
1bfe9ade 7#include "CollapsedForwarding.h" /* XXX: who should broadcast and when? */
9a9954ba
AR
8#include "base/RunnersRegistry.h"
9#include "HttpReply.h"
10#include "ipc/mem/Page.h"
11#include "ipc/mem/Pages.h"
12#include "MemObject.h"
13#include "Transients.h"
14#include "mime_header.h"
15#include "SquidConfig.h"
16#include "SquidMath.h"
17#include "StoreStats.h"
18#include "tools.h"
19
20#if HAVE_LIMITS_H
21#include <limits>
22#endif
23
24
25/// shared memory segment path to use for Transients maps
26static const char *MapLabel = "transients_map";
27
28
29Transients::Transients(): map(NULL)
30{
9a9954ba
AR
31}
32
33Transients::~Transients()
34{
35 delete map;
36}
37
38void
39Transients::init()
40{
41 const int64_t entryLimit = EntryLimit();
42 if (entryLimit <= 0)
43 return; // no SMP support or a misconfiguration
44
45 Must(!map);
46 map = new TransientsMap(MapLabel);
47 map->cleaner = this;
48}
49
50void
51Transients::getStats(StoreInfoStats &stats) const
52{
53#if TRANSIENT_STATS_SUPPORTED
54 const size_t pageSize = Ipc::Mem::PageSize();
55
56 stats.mem.shared = true;
57 stats.mem.capacity =
58 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
59 stats.mem.size =
60 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
61 stats.mem.count = currentCount();
62#endif
63}
64
65void
66Transients::stat(StoreEntry &e) const
67{
68 storeAppendPrintf(&e, "\n\nTransient Objects\n");
69
70 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
71 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
72 currentSize() / 1024.0,
73 Math::doublePercent(currentSize(), maxSize()));
74
75 if (map) {
76 const int limit = map->entryLimit();
77 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
78 if (limit > 0) {
79 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
80 currentCount(), (100.0 * currentCount() / limit));
81 }
82 }
83}
84
85void
86Transients::maintain()
87{
88}
89
90uint64_t
91Transients::minSize() const
92{
93 return 0; // XXX: irrelevant, but Store parent forces us to implement this
94}
95
96uint64_t
97Transients::maxSize() const
98{
99 // Squid currently does not limit the total size of all transient objects
100 return std::numeric_limits<uint64_t>::max();
101}
102
103uint64_t
104Transients::currentSize() const
105{
106 // TODO: we do not get enough information to calculate this
107 // StoreEntry should update associated stores when its size changes
108 return 0;
109}
110
111uint64_t
112Transients::currentCount() const
113{
114 return map ? map->entryCount() : 0;
115}
116
117int64_t
118Transients::maxObjectSize() const
119{
120 // Squid currently does not limit the size of a transient object
121 return std::numeric_limits<uint64_t>::max();
122}
123
124void
125Transients::reference(StoreEntry &)
126{
127}
128
129bool
130Transients::dereference(StoreEntry &, bool)
131{
132 // no need to keep e in the global store_table for us; we have our own map
133 return false;
134}
135
136int
137Transients::callback()
138{
139 return 0;
140}
141
142StoreSearch *
143Transients::search(String const, HttpRequest *)
144{
145 fatal("not implemented");
146 return NULL;
147}
148
149StoreEntry *
150Transients::get(const cache_key *key)
151{
152 if (!map)
153 return NULL;
154
155 sfileno index;
1bfe9ade
AR
156 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
157 if (!anchor)
9a9954ba
AR
158 return NULL;
159
1bfe9ade
AR
160 // Without a writer, either the response has been cached already or we will
161 // get stuck waiting for it to be cached (because nobody will cache it).
162 if (!anchor->writing()) {
163 debugs(20, 5, "ignoring writer-less entry " << index);
164 } else if (StoreEntry *e = copyFromShm(index)) {
4475555f 165 return e; // keep read lock to receive updates from others
1bfe9ade 166 }
4475555f 167
1bfe9ade 168 // missing writer or loading failure
4475555f
AR
169 map->closeForReading(index);
170 return NULL;
171}
172
173StoreEntry *
174Transients::copyFromShm(const sfileno index)
175{
9a9954ba
AR
176 const TransientsMap::Extras &extras = map->extras(index);
177
178 // create a brand new store entry and initialize it with stored info
1bfe9ade 179 StoreEntry *e = storeCreatePureEntry(extras.url, extras.url,
9a9954ba 180 extras.reqFlags, extras.reqMethod);
9a9954ba
AR
181
182 assert(e->mem_obj);
183 e->mem_obj->method = extras.reqMethod;
99921d9d 184 e->mem_obj->xitTable.io = MemObject::ioReading;
4475555f 185 e->mem_obj->xitTable.index = index;
9a9954ba 186
9a9954ba 187 e->setPublicKey();
ce49546e 188 assert(e->key);
9a9954ba 189
4475555f
AR
190 // How do we know its SMP- and not just locally-collapsed? A worker gets
191 // locally-collapsed entries from the local store_table, not Transients.
192 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
193 e->mem_obj->smpCollapsed = true;
194
9a9954ba
AR
195 return e;
196}
197
198void
199Transients::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
200{
201 // XXX: not needed but Store parent forces us to implement this
202 fatal("Transients::get(key,callback,data) should not be called");
203}
204
205void
99921d9d 206Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
9a9954ba
AR
207 const HttpRequestMethod &reqMethod)
208{
209 assert(e);
4475555f
AR
210 assert(e->mem_obj);
211 assert(e->mem_obj->xitTable.index < 0);
9a9954ba
AR
212
213 if (!map) {
214 debugs(20, 5, "No map to add " << *e);
215 return;
216 }
217
218 sfileno index = 0;
219 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
220 if (!slot) {
4475555f 221 debugs(20, 5, "collision registering " << *e);
9a9954ba
AR
222 return;
223 }
224
225 try {
226 if (copyToShm(*e, index, reqFlags, reqMethod)) {
227 slot->set(*e);
99921d9d 228 e->mem_obj->xitTable.io = MemObject::ioWriting;
4475555f
AR
229 e->mem_obj->xitTable.index = index;
230 map->startAppending(index);
231 // keep write lock -- we will be supplying others with updates
9a9954ba
AR
232 return;
233 }
234 // fall through to the error handling code
235 }
236 catch (const std::exception &x) { // TODO: should we catch ... as well?
237 debugs(20, 2, "error keeping entry " << index <<
238 ' ' << *e << ": " << x.what());
239 // fall through to the error handling code
240 }
241
4475555f 242 map->abortWriting(index);
9a9954ba
AR
243}
244
245
246/// copies all relevant local data to shared memory
247bool
248Transients::copyToShm(const StoreEntry &e, const sfileno index,
249 const RequestFlags &reqFlags,
250 const HttpRequestMethod &reqMethod)
251{
252 TransientsMap::Extras &extras = map->extras(index);
253
254 const char *url = e.url();
255 const size_t urlLen = strlen(url);
256 Must(urlLen < sizeof(extras.url)); // we have space to store it all, plus 0
257 strncpy(extras.url, url, sizeof(extras.url));
258 extras.url[urlLen] = '\0';
259
260 extras.reqFlags = reqFlags;
9a9954ba
AR
261
262 Must(reqMethod != Http::METHOD_OTHER);
263 extras.reqMethod = reqMethod.id();
264
265 return true;
266}
267
268void
269Transients::noteFreeMapSlice(const sfileno sliceId)
270{
271 // TODO: we should probably find the entry being deleted and abort it
272}
273
4475555f
AR
274void
275Transients::abandon(const StoreEntry &e)
276{
277 assert(e.mem_obj && map);
278 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
1bfe9ade 279 CollapsedForwarding::Broadcast(e);
4475555f
AR
280 // We do not unlock the entry now because the problem is most likely with
281 // the server resource rather than a specific cache writer, so we want to
282 // prevent other readers from collapsing requests for that resource.
283}
284
285bool
286Transients::abandoned(const StoreEntry &e) const
287{
288 assert(e.mem_obj);
289 return abandonedAt(e.mem_obj->xitTable.index);
290}
291
292/// whether an in-transit entry at the index is now abandoned by its writer
293bool
294Transients::abandonedAt(const sfileno index) const
295{
296 assert(map);
297 return map->readableEntry(index).waitingToBeFreed;
298}
299
99921d9d
AR
300void
301Transients::completeWriting(const StoreEntry &e)
302{
303 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
304 assert(e.mem_obj->xitTable.io == MemObject::ioWriting);
305 map->closeForWriting(e.mem_obj->xitTable.index);
306 e.mem_obj->xitTable.index = -1;
307 e.mem_obj->xitTable.io = MemObject::ioDone;
308 }
309}
310
d366a7fa
AR
311int
312Transients::readers(const StoreEntry &e) const
313{
314 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
315 assert(map);
316 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
317 }
318 return 0;
319}
320
1bfe9ade
AR
321void
322Transients::markForUnlink(StoreEntry &e)
323{
324 if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting)
325 abandon(e);
326}
327
4475555f
AR
328void
329Transients::disconnect(MemObject &mem_obj)
330{
99921d9d
AR
331 if (mem_obj.xitTable.index >= 0) {
332 assert(map);
333 if (mem_obj.xitTable.io == MemObject::ioWriting) {
334 map->abortWriting(mem_obj.xitTable.index);
335 } else {
336 assert(mem_obj.xitTable.io == MemObject::ioReading);
337 map->closeForReading(mem_obj.xitTable.index);
338 }
339 mem_obj.xitTable.index = -1;
340 mem_obj.xitTable.io = MemObject::ioDone;
341 }
4475555f
AR
342}
343
9a9954ba
AR
344/// calculates maximum number of entries we need to store and map
345int64_t
346Transients::EntryLimit()
347{
348 // TODO: we should also check whether any SMP-aware caching is configured
349 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
350 return 0; // no SMP collapsed forwarding possible or needed
351
352 return 16*1024; // XXX: make configurable
353}
354
355/// initializes shared memory segment used by Transients
356class TransientsRr: public Ipc::Mem::RegisteredRunner
357{
358public:
359 /* RegisteredRunner API */
360 TransientsRr(): mapOwner(NULL) {}
361 virtual void run(const RunnerRegistry &);
362 virtual ~TransientsRr();
363
364protected:
365 virtual void create(const RunnerRegistry &);
366
367private:
368 TransientsMap::Owner *mapOwner;
369};
370
371RunnerRegistrationEntry(rrAfterConfig, TransientsRr);
372
373void TransientsRr::run(const RunnerRegistry &r)
374{
375 assert(Config.memShared.configured());
376 Ipc::Mem::RegisteredRunner::run(r);
377}
378
379void TransientsRr::create(const RunnerRegistry &)
380{
9a9954ba
AR
381 if (!Config.onoff.collapsed_forwarding)
382 return;
383
384 const int64_t entryLimit = Transients::EntryLimit();
9a9954ba
AR
385 if (entryLimit <= 0)
386 return; // no SMP configured or a misconfiguration
387
388 Must(!mapOwner);
389 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
390}
391
392TransientsRr::~TransientsRr()
393{
394 delete mapOwner;
395}