]> git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
Merged from trunk rev.13515
[thirdparty/squid.git] / src / Transients.cc
1 /*
2 * DEBUG: section 20 Storage Manager
3 *
4 */
5
6 #include "squid.h"
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
9 #include "HttpReply.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "mime_header.h"
14 #include "SquidConfig.h"
15 #include "SquidMath.h"
16 #include "StoreStats.h"
17 #include "tools.h"
18 #include "Transients.h"
19
20 #include <limits>
21
22 /// shared memory segment path to use for Transients map
23 static const SBuf MapLabel("transients_map");
24 /// shared memory segment path to use for Transients map extras
25 static const char *ExtrasLabel = "transients_ex";
26
27 Transients::Transients(): map(NULL), locals(NULL)
28 {
29 }
30
31 Transients::~Transients()
32 {
33 delete map;
34 delete locals;
35 }
36
37 void
38 Transients::init()
39 {
40 const int64_t entryLimit = EntryLimit();
41 if (entryLimit <= 0)
42 return; // no SMP support or a misconfiguration
43
44 Must(!map);
45 map = new TransientsMap(MapLabel);
46 map->cleaner = this;
47
48 extras = shm_old(TransientsMapExtras)(ExtrasLabel);
49
50 locals = new Locals(entryLimit, 0);
51 }
52
53 void
54 Transients::getStats(StoreInfoStats &stats) const
55 {
56 #if TRANSIENT_STATS_SUPPORTED
57 const size_t pageSize = Ipc::Mem::PageSize();
58
59 stats.mem.shared = true;
60 stats.mem.capacity =
61 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
62 stats.mem.size =
63 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.count = currentCount();
65 #endif
66 }
67
68 void
69 Transients::stat(StoreEntry &e) const
70 {
71 storeAppendPrintf(&e, "\n\nTransient Objects\n");
72
73 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
74 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
75 currentSize() / 1024.0,
76 Math::doublePercent(currentSize(), maxSize()));
77
78 if (map) {
79 const int limit = map->entryLimit();
80 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
81 if (limit > 0) {
82 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
83 currentCount(), (100.0 * currentCount() / limit));
84 }
85 }
86 }
87
88 void
89 Transients::maintain()
90 {
91 // no lazy garbage collection needed
92 }
93
94 uint64_t
95 Transients::minSize() const
96 {
97 return 0; // XXX: irrelevant, but Store parent forces us to implement this
98 }
99
100 uint64_t
101 Transients::maxSize() const
102 {
103 // Squid currently does not limit the total size of all transient objects
104 return std::numeric_limits<uint64_t>::max();
105 }
106
107 uint64_t
108 Transients::currentSize() const
109 {
110 // TODO: we do not get enough information to calculate this
111 // StoreEntry should update associated stores when its size changes
112 return 0;
113 }
114
115 uint64_t
116 Transients::currentCount() const
117 {
118 return map ? map->entryCount() : 0;
119 }
120
121 int64_t
122 Transients::maxObjectSize() const
123 {
124 // Squid currently does not limit the size of a transient object
125 return std::numeric_limits<uint64_t>::max();
126 }
127
128 void
129 Transients::reference(StoreEntry &)
130 {
131 // no replacement policy (but the cache(s) storing the entry may have one)
132 }
133
134 bool
135 Transients::dereference(StoreEntry &, bool)
136 {
137 // no need to keep e in the global store_table for us; we have our own map
138 return false;
139 }
140
141 int
142 Transients::callback()
143 {
144 return 0;
145 }
146
147 StoreSearch *
148 Transients::search(String const, HttpRequest *)
149 {
150 fatal("not implemented");
151 return NULL;
152 }
153
154 StoreEntry *
155 Transients::get(const cache_key *key)
156 {
157 if (!map)
158 return NULL;
159
160 sfileno index;
161 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
162 if (!anchor)
163 return NULL;
164
165 // If we already have a local entry, the store_table should have found it.
166 // Since it did not, the local entry key must have changed from public to
167 // private. We still need to keep the private entry around for syncing as
168 // its clients depend on it, but we should not allow new clients to join.
169 if (StoreEntry *oldE = locals->at(index)) {
170 debugs(20, 3, "not joining private " << *oldE);
171 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
172 } else if (StoreEntry *newE = copyFromShm(index)) {
173 return newE; // keep read lock to receive updates from others
174 }
175
176 // private entry or loading failure
177 map->closeForReading(index);
178 return NULL;
179 }
180
181 StoreEntry *
182 Transients::copyFromShm(const sfileno index)
183 {
184 const TransientsMapExtras::Item &extra = extras->items[index];
185
186 // create a brand new store entry and initialize it with stored info
187 StoreEntry *e = storeCreatePureEntry(extra.url, extra.url,
188 extra.reqFlags, extra.reqMethod);
189
190 assert(e->mem_obj);
191 e->mem_obj->method = extra.reqMethod;
192 e->mem_obj->xitTable.io = MemObject::ioReading;
193 e->mem_obj->xitTable.index = index;
194
195 e->setPublicKey();
196 assert(e->key);
197
198 // How do we know its SMP- and not just locally-collapsed? A worker gets
199 // locally-collapsed entries from the local store_table, not Transients.
200 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
201 e->mem_obj->smpCollapsed = true;
202
203 assert(!locals->at(index));
204 // We do not lock e because we do not want to prevent its destruction;
205 // e is tied to us via mem_obj so we will know when it is destructed.
206 locals->at(index) = e;
207 return e;
208 }
209
210 void
211 Transients::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
212 {
213 // XXX: not needed but Store parent forces us to implement this
214 fatal("Transients::get(key,callback,data) should not be called");
215 }
216
217 StoreEntry *
218 Transients::findCollapsed(const sfileno index)
219 {
220 if (!map)
221 return NULL;
222
223 if (StoreEntry *oldE = locals->at(index)) {
224 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
225 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
226 return oldE;
227 }
228
229 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
230 return NULL;
231 }
232
233 void
234 Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
235 const HttpRequestMethod &reqMethod)
236 {
237 assert(e);
238 assert(e->mem_obj);
239 assert(e->mem_obj->xitTable.index < 0);
240
241 if (!map) {
242 debugs(20, 5, "No map to add " << *e);
243 return;
244 }
245
246 sfileno index = 0;
247 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
248 if (!slot) {
249 debugs(20, 5, "collision registering " << *e);
250 return;
251 }
252
253 try {
254 if (copyToShm(*e, index, reqFlags, reqMethod)) {
255 slot->set(*e);
256 e->mem_obj->xitTable.io = MemObject::ioWriting;
257 e->mem_obj->xitTable.index = index;
258 map->startAppending(index);
259 // keep write lock -- we will be supplying others with updates
260 return;
261 }
262 // fall through to the error handling code
263 } catch (const std::exception &x) { // TODO: should we catch ... as well?
264 debugs(20, 2, "error keeping entry " << index <<
265 ' ' << *e << ": " << x.what());
266 // fall through to the error handling code
267 }
268
269 map->abortWriting(index);
270 }
271
272 /// copies all relevant local data to shared memory
273 bool
274 Transients::copyToShm(const StoreEntry &e, const sfileno index,
275 const RequestFlags &reqFlags,
276 const HttpRequestMethod &reqMethod)
277 {
278 TransientsMapExtras::Item &extra = extras->items[index];
279
280 const char *url = e.url();
281 const size_t urlLen = strlen(url);
282 Must(urlLen < sizeof(extra.url)); // we have space to store it all, plus 0
283 strncpy(extra.url, url, sizeof(extra.url));
284 extra.url[urlLen] = '\0';
285
286 extra.reqFlags = reqFlags;
287
288 Must(reqMethod != Http::METHOD_OTHER);
289 extra.reqMethod = reqMethod.id();
290
291 return true;
292 }
293
294 void
295 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId)
296 {
297 // TODO: we should probably find the entry being deleted and abort it
298 }
299
300 void
301 Transients::abandon(const StoreEntry &e)
302 {
303 assert(e.mem_obj && map);
304 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
305 CollapsedForwarding::Broadcast(e);
306 // We do not unlock the entry now because the problem is most likely with
307 // the server resource rather than a specific cache writer, so we want to
308 // prevent other readers from collapsing requests for that resource.
309 }
310
311 bool
312 Transients::abandoned(const StoreEntry &e) const
313 {
314 assert(e.mem_obj);
315 return abandonedAt(e.mem_obj->xitTable.index);
316 }
317
318 /// whether an in-transit entry at the index is now abandoned by its writer
319 bool
320 Transients::abandonedAt(const sfileno index) const
321 {
322 assert(map);
323 return map->readableEntry(index).waitingToBeFreed;
324 }
325
326 void
327 Transients::completeWriting(const StoreEntry &e)
328 {
329 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
330 assert(e.mem_obj->xitTable.io == MemObject::ioWriting);
331 // there will be no more updates from us after this, so we must prevent
332 // future readers from joining
333 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
334 map->closeForWriting(e.mem_obj->xitTable.index);
335 e.mem_obj->xitTable.index = -1;
336 e.mem_obj->xitTable.io = MemObject::ioDone;
337 }
338 }
339
340 int
341 Transients::readers(const StoreEntry &e) const
342 {
343 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
344 assert(map);
345 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
346 }
347 return 0;
348 }
349
350 void
351 Transients::markForUnlink(StoreEntry &e)
352 {
353 if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting)
354 abandon(e);
355 }
356
357 void
358 Transients::disconnect(MemObject &mem_obj)
359 {
360 if (mem_obj.xitTable.index >= 0) {
361 assert(map);
362 if (mem_obj.xitTable.io == MemObject::ioWriting) {
363 map->abortWriting(mem_obj.xitTable.index);
364 } else {
365 assert(mem_obj.xitTable.io == MemObject::ioReading);
366 map->closeForReading(mem_obj.xitTable.index);
367 }
368 locals->at(mem_obj.xitTable.index) = NULL;
369 mem_obj.xitTable.index = -1;
370 mem_obj.xitTable.io = MemObject::ioDone;
371 }
372 }
373
374 /// calculates maximum number of entries we need to store and map
375 int64_t
376 Transients::EntryLimit()
377 {
378 // TODO: we should also check whether any SMP-aware caching is configured
379 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
380 return 0; // no SMP collapsed forwarding possible or needed
381
382 return 16*1024; // TODO: make configurable?
383 }
384
385 /// initializes shared memory segment used by Transients
386 class TransientsRr: public Ipc::Mem::RegisteredRunner
387 {
388 public:
389 /* RegisteredRunner API */
390 TransientsRr(): mapOwner(NULL), extrasOwner(NULL) {}
391 virtual void useConfig();
392 virtual ~TransientsRr();
393
394 protected:
395 virtual void create();
396
397 private:
398 TransientsMap::Owner *mapOwner;
399 Ipc::Mem::Owner<TransientsMapExtras> *extrasOwner;
400 };
401
402 RunnerRegistrationEntry(TransientsRr);
403
404 void
405 TransientsRr::useConfig()
406 {
407 assert(Config.memShared.configured());
408 Ipc::Mem::RegisteredRunner::useConfig();
409 }
410
411 void
412 TransientsRr::create()
413 {
414 if (!Config.onoff.collapsed_forwarding)
415 return;
416
417 const int64_t entryLimit = Transients::EntryLimit();
418 if (entryLimit <= 0)
419 return; // no SMP configured or a misconfiguration
420
421 Must(!mapOwner);
422 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
423 Must(!extrasOwner);
424 extrasOwner = shm_new(TransientsMapExtras)(ExtrasLabel, entryLimit);
425 }
426
427 TransientsRr::~TransientsRr()
428 {
429 delete extrasOwner;
430 delete mapOwner;
431 }