]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
2 * DEBUG: section 20 Storage Manager
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "mime_header.h"
14 #include "SquidConfig.h"
15 #include "SquidMath.h"
16 #include "StoreStats.h"
18 #include "Transients.h"
24 /// shared memory segment path to use for Transients maps
25 static const char *MapLabel
= "transients_map";
27 Transients::Transients(): map(NULL
), locals(NULL
)
31 Transients::~Transients()
40 const int64_t entryLimit
= EntryLimit();
42 return; // no SMP support or a misconfiguration
45 map
= new TransientsMap(MapLabel
);
48 locals
= new Locals(entryLimit
, 0);
52 Transients::getStats(StoreInfoStats
&stats
) const
54 #if TRANSIENT_STATS_SUPPORTED
55 const size_t pageSize
= Ipc::Mem::PageSize();
57 stats
.mem
.shared
= true;
59 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
61 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
62 stats
.mem
.count
= currentCount();
67 Transients::stat(StoreEntry
&e
) const
69 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
71 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
72 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
73 currentSize() / 1024.0,
74 Math::doublePercent(currentSize(), maxSize()));
77 const int limit
= map
->entryLimit();
78 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
80 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
81 currentCount(), (100.0 * currentCount() / limit
));
87 Transients::maintain()
89 // no lazy garbage collection needed
93 Transients::minSize() const
95 return 0; // XXX: irrelevant, but Store parent forces us to implement this
99 Transients::maxSize() const
101 // Squid currently does not limit the total size of all transient objects
102 return std::numeric_limits
<uint64_t>::max();
106 Transients::currentSize() const
108 // TODO: we do not get enough information to calculate this
109 // StoreEntry should update associated stores when its size changes
114 Transients::currentCount() const
116 return map
? map
->entryCount() : 0;
120 Transients::maxObjectSize() const
122 // Squid currently does not limit the size of a transient object
123 return std::numeric_limits
<uint64_t>::max();
127 Transients::reference(StoreEntry
&)
129 // no replacement policy (but the cache(s) storing the entry may have one)
133 Transients::dereference(StoreEntry
&, bool)
135 // no need to keep e in the global store_table for us; we have our own map
140 Transients::callback()
146 Transients::search(String
const, HttpRequest
*)
148 fatal("not implemented");
153 Transients::get(const cache_key
*key
)
159 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
163 // If we already have a local entry, the store_table should have found it.
164 // Since it did not, the local entry key must have changed from public to
165 // private. We still need to keep the private entry around for syncing as
166 // its clients depend on it, but we should not allow new clients to join.
167 if (StoreEntry
*oldE
= locals
->at(index
)) {
168 debugs(20, 3, "not joining private " << *oldE
);
169 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
170 } else if (StoreEntry
*newE
= copyFromShm(index
)) {
171 return newE
; // keep read lock to receive updates from others
174 // private entry or loading failure
175 map
->closeForReading(index
);
180 Transients::copyFromShm(const sfileno index
)
182 const TransientsMap::Extras
&extras
= map
->extras(index
);
184 // create a brand new store entry and initialize it with stored info
185 StoreEntry
*e
= storeCreatePureEntry(extras
.url
, extras
.url
,
186 extras
.reqFlags
, extras
.reqMethod
);
189 e
->mem_obj
->method
= extras
.reqMethod
;
190 e
->mem_obj
->xitTable
.io
= MemObject::ioReading
;
191 e
->mem_obj
->xitTable
.index
= index
;
196 // How do we know its SMP- and not just locally-collapsed? A worker gets
197 // locally-collapsed entries from the local store_table, not Transients.
198 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
199 e
->mem_obj
->smpCollapsed
= true;
201 assert(!locals
->at(index
));
202 // We do not lock e because we do not want to prevent its destruction;
203 // e is tied to us via mem_obj so we will know when it is destructed.
204 locals
->at(index
) = e
;
209 Transients::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
211 // XXX: not needed but Store parent forces us to implement this
212 fatal("Transients::get(key,callback,data) should not be called");
216 Transients::findCollapsed(const sfileno index
)
221 if (StoreEntry
*oldE
= locals
->at(index
)) {
222 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
223 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
227 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
232 Transients::startWriting(StoreEntry
*e
, const RequestFlags
&reqFlags
,
233 const HttpRequestMethod
&reqMethod
)
237 assert(e
->mem_obj
->xitTable
.index
< 0);
240 debugs(20, 5, "No map to add " << *e
);
245 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
->key
), index
);
247 debugs(20, 5, "collision registering " << *e
);
252 if (copyToShm(*e
, index
, reqFlags
, reqMethod
)) {
254 e
->mem_obj
->xitTable
.io
= MemObject::ioWriting
;
255 e
->mem_obj
->xitTable
.index
= index
;
256 map
->startAppending(index
);
257 // keep write lock -- we will be supplying others with updates
260 // fall through to the error handling code
262 catch (const std::exception
&x
) { // TODO: should we catch ... as well?
263 debugs(20, 2, "error keeping entry " << index
<<
264 ' ' << *e
<< ": " << x
.what());
265 // fall through to the error handling code
268 map
->abortWriting(index
);
271 /// copies all relevant local data to shared memory
273 Transients::copyToShm(const StoreEntry
&e
, const sfileno index
,
274 const RequestFlags
&reqFlags
,
275 const HttpRequestMethod
&reqMethod
)
277 TransientsMap::Extras
&extras
= map
->extras(index
);
279 const char *url
= e
.url();
280 const size_t urlLen
= strlen(url
);
281 Must(urlLen
< sizeof(extras
.url
)); // we have space to store it all, plus 0
282 strncpy(extras
.url
, url
, sizeof(extras
.url
));
283 extras
.url
[urlLen
] = '\0';
285 extras
.reqFlags
= reqFlags
;
287 Must(reqMethod
!= Http::METHOD_OTHER
);
288 extras
.reqMethod
= reqMethod
.id();
294 Transients::noteFreeMapSlice(const sfileno sliceId
)
296 // TODO: we should probably find the entry being deleted and abort it
300 Transients::abandon(const StoreEntry
&e
)
302 assert(e
.mem_obj
&& map
);
303 map
->freeEntry(e
.mem_obj
->xitTable
.index
); // just marks the locked entry
304 CollapsedForwarding::Broadcast(e
);
305 // We do not unlock the entry now because the problem is most likely with
306 // the server resource rather than a specific cache writer, so we want to
307 // prevent other readers from collapsing requests for that resource.
311 Transients::abandoned(const StoreEntry
&e
) const
314 return abandonedAt(e
.mem_obj
->xitTable
.index
);
317 /// whether an in-transit entry at the index is now abandoned by its writer
319 Transients::abandonedAt(const sfileno index
) const
322 return map
->readableEntry(index
).waitingToBeFreed
;
326 Transients::completeWriting(const StoreEntry
&e
)
328 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) {
329 assert(e
.mem_obj
->xitTable
.io
== MemObject::ioWriting
);
330 // there will be no more updates from us after this, so we must prevent
331 // future readers from joining
332 map
->freeEntry(e
.mem_obj
->xitTable
.index
); // just marks the locked entry
333 map
->closeForWriting(e
.mem_obj
->xitTable
.index
);
334 e
.mem_obj
->xitTable
.index
= -1;
335 e
.mem_obj
->xitTable
.io
= MemObject::ioDone
;
340 Transients::readers(const StoreEntry
&e
) const
342 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) {
344 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
350 Transients::markForUnlink(StoreEntry
&e
)
352 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== MemObject::ioWriting
)
357 Transients::disconnect(MemObject
&mem_obj
)
359 if (mem_obj
.xitTable
.index
>= 0) {
361 if (mem_obj
.xitTable
.io
== MemObject::ioWriting
) {
362 map
->abortWriting(mem_obj
.xitTable
.index
);
364 assert(mem_obj
.xitTable
.io
== MemObject::ioReading
);
365 map
->closeForReading(mem_obj
.xitTable
.index
);
367 locals
->at(mem_obj
.xitTable
.index
) = NULL
;
368 mem_obj
.xitTable
.index
= -1;
369 mem_obj
.xitTable
.io
= MemObject::ioDone
;
373 /// calculates maximum number of entries we need to store and map
375 Transients::EntryLimit()
377 // TODO: we should also check whether any SMP-aware caching is configured
378 if (!UsingSmp() || !Config
.onoff
.collapsed_forwarding
)
379 return 0; // no SMP collapsed forwarding possible or needed
381 return 16*1024; // TODO: make configurable?
384 /// initializes shared memory segment used by Transients
385 class TransientsRr
: public Ipc::Mem::RegisteredRunner
388 /* RegisteredRunner API */
389 TransientsRr(): mapOwner(NULL
) {}
390 virtual void run(const RunnerRegistry
&);
391 virtual ~TransientsRr();
394 virtual void create(const RunnerRegistry
&);
397 TransientsMap::Owner
*mapOwner
;
400 RunnerRegistrationEntry(rrAfterConfig
, TransientsRr
);
403 TransientsRr::run(const RunnerRegistry
&r
)
405 assert(Config
.memShared
.configured());
406 Ipc::Mem::RegisteredRunner::run(r
);
410 TransientsRr::create(const RunnerRegistry
&)
412 if (!Config
.onoff
.collapsed_forwarding
)
415 const int64_t entryLimit
= Transients::EntryLimit();
417 return; // no SMP configured or a misconfiguration
420 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
423 TransientsRr::~TransientsRr()