]> git.ipfire.org Git - thirdparty/squid.git/blame - src/Transients.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / Transients.cc
CommitLineData
9a9954ba 1/*
ef57eb7b 2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
9a9954ba 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9a9954ba
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 20 Storage Manager */
10
9a9954ba
AR
11#include "squid.h"
12#include "base/RunnersRegistry.h"
e4d13993 13#include "CollapsedForwarding.h"
9a9954ba
AR
14#include "HttpReply.h"
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
9a9954ba
AR
18#include "mime_header.h"
19#include "SquidConfig.h"
20#include "SquidMath.h"
21#include "StoreStats.h"
22#include "tools.h"
e4d13993 23#include "Transients.h"
9a9954ba 24
9a9954ba 25#include <limits>
9a9954ba 26
1860fbac
AR
27/// shared memory segment path to use for Transients map
28static const SBuf MapLabel("transients_map");
29/// shared memory segment path to use for Transients map extras
30static const char *ExtrasLabel = "transients_ex";
9a9954ba 31
6919be24 32Transients::Transients(): map(NULL), locals(NULL)
9a9954ba 33{
9a9954ba
AR
34}
35
36Transients::~Transients()
37{
38 delete map;
6919be24 39 delete locals;
9a9954ba
AR
40}
41
42void
43Transients::init()
44{
45 const int64_t entryLimit = EntryLimit();
46 if (entryLimit <= 0)
47 return; // no SMP support or a misconfiguration
48
49 Must(!map);
50 map = new TransientsMap(MapLabel);
51 map->cleaner = this;
6919be24 52
1860fbac
AR
53 extras = shm_old(TransientsMapExtras)(ExtrasLabel);
54
8bcca0f8 55 locals = new Locals(entryLimit, 0);
9a9954ba
AR
56}
57
58void
59Transients::getStats(StoreInfoStats &stats) const
60{
61#if TRANSIENT_STATS_SUPPORTED
62 const size_t pageSize = Ipc::Mem::PageSize();
63
64 stats.mem.shared = true;
65 stats.mem.capacity =
66 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
67 stats.mem.size =
68 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
69 stats.mem.count = currentCount();
70#endif
71}
72
73void
74Transients::stat(StoreEntry &e) const
75{
76 storeAppendPrintf(&e, "\n\nTransient Objects\n");
77
78 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
79 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
80 currentSize() / 1024.0,
81 Math::doublePercent(currentSize(), maxSize()));
82
83 if (map) {
84 const int limit = map->entryLimit();
85 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
86 if (limit > 0) {
87 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
88 currentCount(), (100.0 * currentCount() / limit));
89 }
90 }
91}
92
93void
94Transients::maintain()
95{
e4d13993 96 // no lazy garbage collection needed
9a9954ba
AR
97}
98
99uint64_t
100Transients::minSize() const
101{
102 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103}
104
105uint64_t
106Transients::maxSize() const
107{
108 // Squid currently does not limit the total size of all transient objects
109 return std::numeric_limits<uint64_t>::max();
110}
111
112uint64_t
113Transients::currentSize() const
114{
115 // TODO: we do not get enough information to calculate this
116 // StoreEntry should update associated stores when its size changes
117 return 0;
118}
119
120uint64_t
121Transients::currentCount() const
122{
123 return map ? map->entryCount() : 0;
124}
125
126int64_t
127Transients::maxObjectSize() const
128{
129 // Squid currently does not limit the size of a transient object
130 return std::numeric_limits<uint64_t>::max();
131}
132
133void
134Transients::reference(StoreEntry &)
135{
e4d13993 136 // no replacement policy (but the cache(s) storing the entry may have one)
9a9954ba
AR
137}
138
139bool
2745fea5 140Transients::dereference(StoreEntry &)
9a9954ba
AR
141{
142 // no need to keep e in the global store_table for us; we have our own map
143 return false;
144}
145
9a9954ba
AR
146StoreEntry *
147Transients::get(const cache_key *key)
148{
149 if (!map)
150 return NULL;
151
152 sfileno index;
1bfe9ade
AR
153 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
154 if (!anchor)
9a9954ba
AR
155 return NULL;
156
6919be24
AR
157 // If we already have a local entry, the store_table should have found it.
158 // Since it did not, the local entry key must have changed from public to
159 // private. We still need to keep the private entry around for syncing as
160 // its clients depend on it, but we should not allow new clients to join.
161 if (StoreEntry *oldE = locals->at(index)) {
162 debugs(20, 3, "not joining private " << *oldE);
163 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
164 } else if (StoreEntry *newE = copyFromShm(index)) {
165 return newE; // keep read lock to receive updates from others
1bfe9ade 166 }
4475555f 167
6919be24 168 // private entry or loading failure
4475555f
AR
169 map->closeForReading(index);
170 return NULL;
171}
172
173StoreEntry *
174Transients::copyFromShm(const sfileno index)
175{
1860fbac 176 const TransientsMapExtras::Item &extra = extras->items[index];
9a9954ba
AR
177
178 // create a brand new store entry and initialize it with stored info
1860fbac
AR
179 StoreEntry *e = storeCreatePureEntry(extra.url, extra.url,
180 extra.reqFlags, extra.reqMethod);
9a9954ba
AR
181
182 assert(e->mem_obj);
1860fbac 183 e->mem_obj->method = extra.reqMethod;
99921d9d 184 e->mem_obj->xitTable.io = MemObject::ioReading;
4475555f 185 e->mem_obj->xitTable.index = index;
9a9954ba 186
9a9954ba 187 e->setPublicKey();
ce49546e 188 assert(e->key);
9a9954ba 189
4475555f
AR
190 // How do we know its SMP- and not just locally-collapsed? A worker gets
191 // locally-collapsed entries from the local store_table, not Transients.
192 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
193 e->mem_obj->smpCollapsed = true;
194
6919be24
AR
195 assert(!locals->at(index));
196 // We do not lock e because we do not want to prevent its destruction;
197 // e is tied to us via mem_obj so we will know when it is destructed.
198 locals->at(index) = e;
9a9954ba
AR
199 return e;
200}
201
6919be24
AR
202StoreEntry *
203Transients::findCollapsed(const sfileno index)
204{
205 if (!map)
206 return NULL;
207
208 if (StoreEntry *oldE = locals->at(index)) {
209 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
210 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
211 return oldE;
212 }
213
214 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
215 return NULL;
216}
217
9a9954ba 218void
99921d9d 219Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags,
9d4e9cfb 220 const HttpRequestMethod &reqMethod)
9a9954ba
AR
221{
222 assert(e);
4475555f
AR
223 assert(e->mem_obj);
224 assert(e->mem_obj->xitTable.index < 0);
9a9954ba
AR
225
226 if (!map) {
227 debugs(20, 5, "No map to add " << *e);
228 return;
9d4e9cfb 229 }
9a9954ba
AR
230
231 sfileno index = 0;
232 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index);
233 if (!slot) {
4475555f 234 debugs(20, 5, "collision registering " << *e);
9a9954ba 235 return;
9d4e9cfb 236 }
9a9954ba
AR
237
238 try {
239 if (copyToShm(*e, index, reqFlags, reqMethod)) {
240 slot->set(*e);
99921d9d 241 e->mem_obj->xitTable.io = MemObject::ioWriting;
4475555f
AR
242 e->mem_obj->xitTable.index = index;
243 map->startAppending(index);
244 // keep write lock -- we will be supplying others with updates
9a9954ba 245 return;
9d4e9cfb 246 }
9a9954ba 247 // fall through to the error handling code
9d4e9cfb 248 } catch (const std::exception &x) { // TODO: should we catch ... as well?
9a9954ba
AR
249 debugs(20, 2, "error keeping entry " << index <<
250 ' ' << *e << ": " << x.what());
251 // fall through to the error handling code
9d4e9cfb 252 }
9a9954ba 253
4475555f 254 map->abortWriting(index);
9a9954ba
AR
255}
256
9a9954ba
AR
257/// copies all relevant local data to shared memory
258bool
259Transients::copyToShm(const StoreEntry &e, const sfileno index,
260 const RequestFlags &reqFlags,
261 const HttpRequestMethod &reqMethod)
262{
1860fbac 263 TransientsMapExtras::Item &extra = extras->items[index];
9a9954ba
AR
264
265 const char *url = e.url();
266 const size_t urlLen = strlen(url);
1860fbac
AR
267 Must(urlLen < sizeof(extra.url)); // we have space to store it all, plus 0
268 strncpy(extra.url, url, sizeof(extra.url));
269 extra.url[urlLen] = '\0';
9a9954ba 270
1860fbac 271 extra.reqFlags = reqFlags;
9a9954ba
AR
272
273 Must(reqMethod != Http::METHOD_OTHER);
1860fbac 274 extra.reqMethod = reqMethod.id();
9a9954ba
AR
275
276 return true;
277}
278
279void
ced8def3 280Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
9a9954ba
AR
281{
282 // TODO: we should probably find the entry being deleted and abort it
283}
284
4475555f
AR
285void
286Transients::abandon(const StoreEntry &e)
287{
288 assert(e.mem_obj && map);
289 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
1bfe9ade 290 CollapsedForwarding::Broadcast(e);
4475555f
AR
291 // We do not unlock the entry now because the problem is most likely with
292 // the server resource rather than a specific cache writer, so we want to
293 // prevent other readers from collapsing requests for that resource.
294}
295
296bool
297Transients::abandoned(const StoreEntry &e) const
298{
299 assert(e.mem_obj);
300 return abandonedAt(e.mem_obj->xitTable.index);
301}
302
303/// whether an in-transit entry at the index is now abandoned by its writer
304bool
305Transients::abandonedAt(const sfileno index) const
306{
307 assert(map);
308 return map->readableEntry(index).waitingToBeFreed;
309}
310
99921d9d
AR
311void
312Transients::completeWriting(const StoreEntry &e)
313{
314 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
315 assert(e.mem_obj->xitTable.io == MemObject::ioWriting);
6919be24
AR
316 // there will be no more updates from us after this, so we must prevent
317 // future readers from joining
318 map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry
99921d9d
AR
319 map->closeForWriting(e.mem_obj->xitTable.index);
320 e.mem_obj->xitTable.index = -1;
321 e.mem_obj->xitTable.io = MemObject::ioDone;
322 }
323}
324
d366a7fa
AR
325int
326Transients::readers(const StoreEntry &e) const
327{
328 if (e.mem_obj && e.mem_obj->xitTable.index >= 0) {
329 assert(map);
330 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
331 }
332 return 0;
333}
334
1bfe9ade
AR
335void
336Transients::markForUnlink(StoreEntry &e)
2745fea5
AR
337{
338 unlink(e);
339}
340
341void
342Transients::unlink(StoreEntry &e)
1bfe9ade
AR
343{
344 if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting)
345 abandon(e);
346}
347
4475555f
AR
348void
349Transients::disconnect(MemObject &mem_obj)
350{
99921d9d
AR
351 if (mem_obj.xitTable.index >= 0) {
352 assert(map);
353 if (mem_obj.xitTable.io == MemObject::ioWriting) {
354 map->abortWriting(mem_obj.xitTable.index);
355 } else {
356 assert(mem_obj.xitTable.io == MemObject::ioReading);
357 map->closeForReading(mem_obj.xitTable.index);
358 }
6919be24 359 locals->at(mem_obj.xitTable.index) = NULL;
99921d9d
AR
360 mem_obj.xitTable.index = -1;
361 mem_obj.xitTable.io = MemObject::ioDone;
362 }
4475555f
AR
363}
364
9a9954ba
AR
365/// calculates maximum number of entries we need to store and map
366int64_t
367Transients::EntryLimit()
368{
369 // TODO: we should also check whether any SMP-aware caching is configured
370 if (!UsingSmp() || !Config.onoff.collapsed_forwarding)
371 return 0; // no SMP collapsed forwarding possible or needed
372
e6d2c263 373 return 16*1024; // TODO: make configurable?
9a9954ba
AR
374}
375
376/// initializes shared memory segment used by Transients
377class TransientsRr: public Ipc::Mem::RegisteredRunner
378{
379public:
380 /* RegisteredRunner API */
1860fbac 381 TransientsRr(): mapOwner(NULL), extrasOwner(NULL) {}
21b7990f 382 virtual void useConfig();
9a9954ba
AR
383 virtual ~TransientsRr();
384
385protected:
21b7990f 386 virtual void create();
9a9954ba
AR
387
388private:
389 TransientsMap::Owner *mapOwner;
1860fbac 390 Ipc::Mem::Owner<TransientsMapExtras> *extrasOwner;
9a9954ba
AR
391};
392
21b7990f 393RunnerRegistrationEntry(TransientsRr);
9a9954ba 394
e4d13993 395void
21b7990f 396TransientsRr::useConfig()
9a9954ba
AR
397{
398 assert(Config.memShared.configured());
21b7990f 399 Ipc::Mem::RegisteredRunner::useConfig();
9a9954ba
AR
400}
401
e4d13993 402void
21b7990f 403TransientsRr::create()
9a9954ba 404{
9a9954ba
AR
405 if (!Config.onoff.collapsed_forwarding)
406 return;
407
408 const int64_t entryLimit = Transients::EntryLimit();
9a9954ba
AR
409 if (entryLimit <= 0)
410 return; // no SMP configured or a misconfiguration
411
412 Must(!mapOwner);
413 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
1860fbac
AR
414 Must(!extrasOwner);
415 extrasOwner = shm_new(TransientsMapExtras)(ExtrasLabel, entryLimit);
9a9954ba
AR
416}
417
418TransientsRr::~TransientsRr()
419{
1860fbac 420 delete extrasOwner;
9a9954ba
AR
421 delete mapOwner;
422}
f53969cc 423