]> git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / Transients.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
22 #include "tools.h"
23 #include "Transients.h"
24
25 #include <limits>
26
27 /// shared memory segment path to use for Transients map
28 static const SBuf MapLabel("transients_map");
29 /// shared memory segment path to use for Transients map extras
30 static const char *ExtrasLabel = "transients_ex";
31
32 Transients::Transients(): map(NULL), locals(NULL)
33 {
34 }
35
36 Transients::~Transients()
37 {
38 delete map;
39 delete locals;
40 }
41
42 void
43 Transients::init()
44 {
45 const int64_t entryLimit = EntryLimit();
46 if (entryLimit <= 0)
47 return; // no SMP support or a misconfiguration
48
49 Must(!map);
50 map = new TransientsMap(MapLabel);
51 map->cleaner = this;
52
53 extras = shm_old(TransientsMapExtras)(ExtrasLabel);
54
55 locals = new Locals(entryLimit, 0);
56 }
57
58 void
59 Transients::getStats(StoreInfoStats &stats) const
60 {
61 #if TRANSIENT_STATS_SUPPORTED
62 const size_t pageSize = Ipc::Mem::PageSize();
63
64 stats.mem.shared = true;
65 stats.mem.capacity =
66 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
67 stats.mem.size =
68 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
69 stats.mem.count = currentCount();
70 #endif
71 }
72
73 void
74 Transients::stat(StoreEntry &e) const
75 {
76 storeAppendPrintf(&e, "\n\nTransient Objects\n");
77
78 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
79 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
80 currentSize() / 1024.0,
81 Math::doublePercent(currentSize(), maxSize()));
82
83 if (map) {
84 const int limit = map->entryLimit();
85 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
86 if (limit > 0) {
87 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
88 currentCount(), (100.0 * currentCount() / limit));
89 }
90 }
91 }
92
93 void
94 Transients::maintain()
95 {
96 // no lazy garbage collection needed
97 }
98
99 uint64_t
100 Transients::minSize() const
101 {
102 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103 }
104
105 uint64_t
106 Transients::maxSize() const
107 {
108 // Squid currently does not limit the total size of all transient objects
109 return std::numeric_limits<uint64_t>::max();
110 }
111
112 uint64_t
113 Transients::currentSize() const
114 {
115 // TODO: we do not get enough information to calculate this
116 // StoreEntry should update associated stores when its size changes
117 return 0;
118 }
119
120 uint64_t
121 Transients::currentCount() const
122 {
123 return map ? map->entryCount() : 0;
124 }
125
126 int64_t
127 Transients::maxObjectSize() const
128 {
129 // Squid currently does not limit the size of a transient object
130 return std::numeric_limits<uint64_t>::max();
131 }
132
133 void
134 Transients::reference(StoreEntry &)
135 {
136 // no replacement policy (but the cache(s) storing the entry may have one)
137 }
138
139 bool
140 Transients::dereference(StoreEntry &)
141 {
142 // no need to keep e in the global store_table for us; we have our own map
143 return false;
144 }
145
146 StoreEntry *
147 Transients::get(const cache_key *key)
148 {
149 if (!map)
150 return NULL;
151
152 sfileno index;
153 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
154 if (!anchor)
155 return NULL;
156
157 // If we already have a local entry, the store_table should have found it.
158 // Since it did not, the local entry key must have changed from public to
159 // private. We still need to keep the private entry around for syncing as
160 // its clients depend on it, but we should not allow new clients to join.
161 if (StoreEntry *oldE = locals->at(index)) {
162 debugs(20, 3, "not joining private " << *oldE);
163 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
164 } else if (StoreEntry *newE = copyFromShm(index)) {
165 return newE; // keep read lock to receive updates from others
166 }
167
168 // private entry or loading failure
169 map->closeForReading(index);
170 return NULL;
171 }
172
173 StoreEntry *
174 Transients::copyFromShm(const sfileno index)
175 {
176 const TransientsMapExtras::Item &extra = extras->items[index];
177
178 // create a brand new store entry and initialize it with stored info
179 StoreEntry *e = storeCreatePureEntry(extra.url, extra.url,
180 extra.reqFlags, extra.reqMethod);
181
182 assert(e->mem_obj);
183 e->mem_obj->method = extra.reqMethod;
184 e->mem_obj->xitTable.io = MemObject::ioReading;
185 e->mem_obj->xitTable.index = index;
186
187 // TODO: Support collapsed revalidation for SMP-aware caches.
188 e->setPublicKey(ksDefault);
189 assert(e->key);
190
191 // How do we know its SMP- and not just locally-collapsed? A worker gets
192 // locally-collapsed entries from the local store_table, not Transients.
193 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
194 e->mem_obj->smpCollapsed = true;
195
196 assert(!locals->at(index));
197 // We do not lock e because we do not want to prevent its destruction;
198 // e is tied to us via mem_obj so we will know when it is destructed.
199 locals->at(index) = e;
200 return e;
201 }
202
203 StoreEntry *
204 Transients::findCollapsed(const sfileno index)
205 {
206 if (!map)
207 return NULL;
208
209 if (StoreEntry *oldE = locals->at(index)) {
210 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
211 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
212 return oldE;
213 }
214
215 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
216 return NULL;
217 }
218
219 void
220 Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
221 const HttpRequestMethod &reqMethod)
222 {
223 assert(e);
224 assert(e->mem_obj);
225 assert(e->mem_obj->xitTable.index < 0);
226
227 if (!map) {
228 debugs(20, 5, "No map to add " << *e);
229 return;
230 }
231
232 sfileno index = 0;
233 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
234 if (!slot) {
235 debugs(20, 5, "collision registering " << *e);
236 return;
237 }
238
239 try {
240 if (copyToShm(*e, index, reqFlags, reqMethod)) {
241 slot->set(*e);
242 e->mem_obj->xitTable.io = MemObject::ioWriting;
243 e->mem_obj->xitTable.index = index;
244 map->startAppending(index);
245 // keep write lock -- we will be supplying others with updates
246 return;
247 }
248 // fall through to the error handling code
249 } catch (const std::exception &x) { // TODO: should we catch ... as well?
250 debugs(20, 2, "error keeping entry " << index <<
251 ' ' << *e << ": " << x.what());
252 // fall through to the error handling code
253 }
254
255 map->abortWriting(index);
256 }
257
258 /// copies all relevant local data to shared memory
259 bool
260 Transients::copyToShm(const StoreEntry &e, const sfileno index,
261 const RequestFlags &reqFlags,
262 const HttpRequestMethod &reqMethod)
263 {
264 TransientsMapExtras::Item &extra = extras->items[index];
265
266 const char *url = e.url();
267 const size_t urlLen = strlen(url);
268 Must(urlLen < sizeof(extra.url)); // we have space to store it all, plus 0
269 strncpy(extra.url, url, sizeof(extra.url));
270 extra.url[urlLen] = '\0';
271
272 extra.reqFlags = reqFlags;
273
274 Must(reqMethod != Http::METHOD_OTHER);
275 extra.reqMethod = reqMethod.id();
276
277 return true;
278 }
279
280 void
281 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
282 {
283 // TODO: we should probably find the entry being deleted and abort it
284 }
285
286 void
287 Transients::abandon(const StoreEntry &e)
288 {
289 assert(e.mem_obj && map);
290 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
291 CollapsedForwarding::Broadcast(e);
292 // We do not unlock the entry now because the problem is most likely with
293 // the server resource rather than a specific cache writer, so we want to
294 // prevent other readers from collapsing requests for that resource.
295 }
296
297 bool
298 Transients::abandoned(const StoreEntry &e) const
299 {
300 assert(e.mem_obj);
301 return abandonedAt(e.mem_obj->xitTable.index);
302 }
303
304 /// whether an in-transit entry at the index is now abandoned by its writer
305 bool
306 Transients::abandonedAt(const sfileno index) const
307 {
308 assert(map);
309 return map->readableEntry(index).waitingToBeFreed;
310 }
311
312 void
313 Transients::completeWriting(const StoreEntry &e)
314 {
315 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
316 assert(e.mem_obj->xitTable.io == MemObject::ioWriting);
317 // there will be no more updates from us after this, so we must prevent
318 // future readers from joining
319 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
320 map->closeForWriting(e.mem_obj->xitTable.index);
321 e.mem_obj->xitTable.index = -1;
322 e.mem_obj->xitTable.io = MemObject::ioDone;
323 }
324 }
325
326 int
327 Transients::readers(const StoreEntry &e) const
328 {
329 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
330 assert(map);
331 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
332 }
333 return 0;
334 }
335
336 void
337 Transients::markForUnlink(StoreEntry &e)
338 {
339 unlink(e);
340 }
341
342 void
343 Transients::unlink(StoreEntry &e)
344 {
345 if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting)
346 abandon(e);
347 }
348
349 void
350 Transients::disconnect(MemObject &mem_obj)
351 {
352 if (mem_obj.xitTable.index >= 0) {
353 assert(map);
354 if (mem_obj.xitTable.io == MemObject::ioWriting) {
355 map->abortWriting(mem_obj.xitTable.index);
356 } else {
357 assert(mem_obj.xitTable.io == MemObject::ioReading);
358 map->closeForReading(mem_obj.xitTable.index);
359 }
360 locals->at(mem_obj.xitTable.index) = NULL;
361 mem_obj.xitTable.index = -1;
362 mem_obj.xitTable.io = MemObject::ioDone;
363 }
364 }
365
366 /// calculates maximum number of entries we need to store and map
367 int64_t
368 Transients::EntryLimit()
369 {
370 // TODO: we should also check whether any SMP-aware caching is configured
371 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
372 return 0; // no SMP collapsed forwarding possible or needed
373
374 return Config.collapsed_forwarding_shared_entries_limit;
375 }
376
377 /// initializes shared memory segment used by Transients
378 class TransientsRr: public Ipc::Mem::RegisteredRunner
379 {
380 public:
381 /* RegisteredRunner API */
382 TransientsRr(): mapOwner(NULL), extrasOwner(NULL) {}
383 virtual void useConfig();
384 virtual ~TransientsRr();
385
386 protected:
387 virtual void create();
388
389 private:
390 TransientsMap::Owner *mapOwner;
391 Ipc::Mem::Owner<TransientsMapExtras> *extrasOwner;
392 };
393
394 RunnerRegistrationEntry(TransientsRr);
395
396 void
397 TransientsRr::useConfig()
398 {
399 assert(Config.memShared.configured());
400 Ipc::Mem::RegisteredRunner::useConfig();
401 }
402
403 void
404 TransientsRr::create()
405 {
406 if (!Config.onoff.collapsed_forwarding)
407 return;
408
409 const int64_t entryLimit = Transients::EntryLimit();
410 if (entryLimit <= 0)
411 return; // no SMP configured or a misconfiguration
412
413 Must(!mapOwner);
414 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
415 Must(!extrasOwner);
416 extrasOwner = shm_new(TransientsMapExtras)(ExtrasLabel, entryLimit);
417 }
418
419 TransientsRr::~TransientsRr()
420 {
421 delete extrasOwner;
422 delete mapOwner;
423 }
424