]> git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
Store API and layout polishing. No functionality changes intended.
[thirdparty/squid.git] / src / Transients.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
22 #include "tools.h"
23 #include "Transients.h"
24
25 #include <limits>
26
27 /// shared memory segment path to use for Transients map
28 static const SBuf MapLabel("transients_map");
29 /// shared memory segment path to use for Transients map extras
30 static const char *ExtrasLabel = "transients_ex";
31
32 Transients::Transients(): map(NULL), locals(NULL)
33 {
34 }
35
36 Transients::~Transients()
37 {
38 delete map;
39 delete locals;
40 }
41
42 void
43 Transients::init()
44 {
45 const int64_t entryLimit = EntryLimit();
46 if (entryLimit <= 0)
47 return; // no SMP support or a misconfiguration
48
49 Must(!map);
50 map = new TransientsMap(MapLabel);
51 map->cleaner = this;
52
53 extras = shm_old(TransientsMapExtras)(ExtrasLabel);
54
55 locals = new Locals(entryLimit, 0);
56 }
57
58 void
59 Transients::getStats(StoreInfoStats &stats) const
60 {
61 #if TRANSIENT_STATS_SUPPORTED
62 const size_t pageSize = Ipc::Mem::PageSize();
63
64 stats.mem.shared = true;
65 stats.mem.capacity =
66 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
67 stats.mem.size =
68 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
69 stats.mem.count = currentCount();
70 #endif
71 }
72
73 void
74 Transients::stat(StoreEntry &e) const
75 {
76 storeAppendPrintf(&e, "\n\nTransient Objects\n");
77
78 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
79 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
80 currentSize() / 1024.0,
81 Math::doublePercent(currentSize(), maxSize()));
82
83 if (map) {
84 const int limit = map->entryLimit();
85 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
86 if (limit > 0) {
87 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
88 currentCount(), (100.0 * currentCount() / limit));
89 }
90 }
91 }
92
93 void
94 Transients::maintain()
95 {
96 // no lazy garbage collection needed
97 }
98
99 uint64_t
100 Transients::minSize() const
101 {
102 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103 }
104
105 uint64_t
106 Transients::maxSize() const
107 {
108 // Squid currently does not limit the total size of all transient objects
109 return std::numeric_limits<uint64_t>::max();
110 }
111
112 uint64_t
113 Transients::currentSize() const
114 {
115 // TODO: we do not get enough information to calculate this
116 // StoreEntry should update associated stores when its size changes
117 return 0;
118 }
119
120 uint64_t
121 Transients::currentCount() const
122 {
123 return map ? map->entryCount() : 0;
124 }
125
126 int64_t
127 Transients::maxObjectSize() const
128 {
129 // Squid currently does not limit the size of a transient object
130 return std::numeric_limits<uint64_t>::max();
131 }
132
133 void
134 Transients::reference(StoreEntry &)
135 {
136 // no replacement policy (but the cache(s) storing the entry may have one)
137 }
138
139 bool
140 Transients::dereference(StoreEntry &)
141 {
142 // no need to keep e in the global store_table for us; we have our own map
143 return false;
144 }
145
146 StoreEntry *
147 Transients::get(const cache_key *key)
148 {
149 if (!map)
150 return NULL;
151
152 sfileno index;
153 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
154 if (!anchor)
155 return NULL;
156
157 // If we already have a local entry, the store_table should have found it.
158 // Since it did not, the local entry key must have changed from public to
159 // private. We still need to keep the private entry around for syncing as
160 // its clients depend on it, but we should not allow new clients to join.
161 if (StoreEntry *oldE = locals->at(index)) {
162 debugs(20, 3, "not joining private " << *oldE);
163 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
164 } else if (StoreEntry *newE = copyFromShm(index)) {
165 return newE; // keep read lock to receive updates from others
166 }
167
168 // private entry or loading failure
169 map->closeForReading(index);
170 return NULL;
171 }
172
173 StoreEntry *
174 Transients::copyFromShm(const sfileno index)
175 {
176 const TransientsMapExtras::Item &extra = extras->items[index];
177
178 // create a brand new store entry and initialize it with stored info
179 StoreEntry *e = storeCreatePureEntry(extra.url, extra.url,
180 extra.reqFlags, extra.reqMethod);
181
182 assert(e->mem_obj);
183 e->mem_obj->method = extra.reqMethod;
184 e->mem_obj->xitTable.io = MemObject::ioReading;
185 e->mem_obj->xitTable.index = index;
186
187 e->setPublicKey();
188 assert(e->key);
189
190 // How do we know its SMP- and not just locally-collapsed? A worker gets
191 // locally-collapsed entries from the local store_table, not Transients.
192 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
193 e->mem_obj->smpCollapsed = true;
194
195 assert(!locals->at(index));
196 // We do not lock e because we do not want to prevent its destruction;
197 // e is tied to us via mem_obj so we will know when it is destructed.
198 locals->at(index) = e;
199 return e;
200 }
201
202 StoreEntry *
203 Transients::findCollapsed(const sfileno index)
204 {
205 if (!map)
206 return NULL;
207
208 if (StoreEntry *oldE = locals->at(index)) {
209 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
210 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
211 return oldE;
212 }
213
214 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
215 return NULL;
216 }
217
218 void
219 Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
220 const HttpRequestMethod &reqMethod)
221 {
222 assert(e);
223 assert(e->mem_obj);
224 assert(e->mem_obj->xitTable.index < 0);
225
226 if (!map) {
227 debugs(20, 5, "No map to add " << *e);
228 return;
229 }
230
231 sfileno index = 0;
232 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
233 if (!slot) {
234 debugs(20, 5, "collision registering " << *e);
235 return;
236 }
237
238 try {
239 if (copyToShm(*e, index, reqFlags, reqMethod)) {
240 slot->set(*e);
241 e->mem_obj->xitTable.io = MemObject::ioWriting;
242 e->mem_obj->xitTable.index = index;
243 map->startAppending(index);
244 // keep write lock -- we will be supplying others with updates
245 return;
246 }
247 // fall through to the error handling code
248 } catch (const std::exception &x) { // TODO: should we catch ... as well?
249 debugs(20, 2, "error keeping entry " << index <<
250 ' ' << *e << ": " << x.what());
251 // fall through to the error handling code
252 }
253
254 map->abortWriting(index);
255 }
256
257 /// copies all relevant local data to shared memory
258 bool
259 Transients::copyToShm(const StoreEntry &e, const sfileno index,
260 const RequestFlags &reqFlags,
261 const HttpRequestMethod &reqMethod)
262 {
263 TransientsMapExtras::Item &extra = extras->items[index];
264
265 const char *url = e.url();
266 const size_t urlLen = strlen(url);
267 Must(urlLen < sizeof(extra.url)); // we have space to store it all, plus 0
268 strncpy(extra.url, url, sizeof(extra.url));
269 extra.url[urlLen] = '\0';
270
271 extra.reqFlags = reqFlags;
272
273 Must(reqMethod != Http::METHOD_OTHER);
274 extra.reqMethod = reqMethod.id();
275
276 return true;
277 }
278
279 void
280 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
281 {
282 // TODO: we should probably find the entry being deleted and abort it
283 }
284
285 void
286 Transients::abandon(const StoreEntry &e)
287 {
288 assert(e.mem_obj && map);
289 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
290 CollapsedForwarding::Broadcast(e);
291 // We do not unlock the entry now because the problem is most likely with
292 // the server resource rather than a specific cache writer, so we want to
293 // prevent other readers from collapsing requests for that resource.
294 }
295
296 bool
297 Transients::abandoned(const StoreEntry &e) const
298 {
299 assert(e.mem_obj);
300 return abandonedAt(e.mem_obj->xitTable.index);
301 }
302
303 /// whether an in-transit entry at the index is now abandoned by its writer
304 bool
305 Transients::abandonedAt(const sfileno index) const
306 {
307 assert(map);
308 return map->readableEntry(index).waitingToBeFreed;
309 }
310
311 void
312 Transients::completeWriting(const StoreEntry &e)
313 {
314 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
315 assert(e.mem_obj->xitTable.io == MemObject::ioWriting);
316 // there will be no more updates from us after this, so we must prevent
317 // future readers from joining
318 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
319 map->closeForWriting(e.mem_obj->xitTable.index);
320 e.mem_obj->xitTable.index = -1;
321 e.mem_obj->xitTable.io = MemObject::ioDone;
322 }
323 }
324
325 int
326 Transients::readers(const StoreEntry &e) const
327 {
328 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
329 assert(map);
330 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
331 }
332 return 0;
333 }
334
335 void
336 Transients::markForUnlink(StoreEntry &e)
337 {
338 unlink(e);
339 }
340
341 void
342 Transients::unlink(StoreEntry &e)
343 {
344 if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting)
345 abandon(e);
346 }
347
348 void
349 Transients::disconnect(MemObject &mem_obj)
350 {
351 if (mem_obj.xitTable.index >= 0) {
352 assert(map);
353 if (mem_obj.xitTable.io == MemObject::ioWriting) {
354 map->abortWriting(mem_obj.xitTable.index);
355 } else {
356 assert(mem_obj.xitTable.io == MemObject::ioReading);
357 map->closeForReading(mem_obj.xitTable.index);
358 }
359 locals->at(mem_obj.xitTable.index) = NULL;
360 mem_obj.xitTable.index = -1;
361 mem_obj.xitTable.io = MemObject::ioDone;
362 }
363 }
364
365 /// calculates maximum number of entries we need to store and map
366 int64_t
367 Transients::EntryLimit()
368 {
369 // TODO: we should also check whether any SMP-aware caching is configured
370 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
371 return 0; // no SMP collapsed forwarding possible or needed
372
373 return 16*1024; // TODO: make configurable?
374 }
375
376 /// initializes shared memory segment used by Transients
377 class TransientsRr: public Ipc::Mem::RegisteredRunner
378 {
379 public:
380 /* RegisteredRunner API */
381 TransientsRr(): mapOwner(NULL), extrasOwner(NULL) {}
382 virtual void useConfig();
383 virtual ~TransientsRr();
384
385 protected:
386 virtual void create();
387
388 private:
389 TransientsMap::Owner *mapOwner;
390 Ipc::Mem::Owner<TransientsMapExtras> *extrasOwner;
391 };
392
393 RunnerRegistrationEntry(TransientsRr);
394
395 void
396 TransientsRr::useConfig()
397 {
398 assert(Config.memShared.configured());
399 Ipc::Mem::RegisteredRunner::useConfig();
400 }
401
402 void
403 TransientsRr::create()
404 {
405 if (!Config.onoff.collapsed_forwarding)
406 return;
407
408 const int64_t entryLimit = Transients::EntryLimit();
409 if (entryLimit <= 0)
410 return; // no SMP configured or a misconfiguration
411
412 Must(!mapOwner);
413 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
414 Must(!extrasOwner);
415 extrasOwner = shm_new(TransientsMapExtras)(ExtrasLabel, entryLimit);
416 }
417
418 TransientsRr::~TransientsRr()
419 {
420 delete extrasOwner;
421 delete mapOwner;
422 }
423