]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
2 * DEBUG: section 20 Storage Manager
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "mime_header.h"
14 #include "SquidConfig.h"
15 #include "SquidMath.h"
16 #include "StoreStats.h"
18 #include "Transients.h"
22 /// shared memory segment path to use for Transients map
23 static const SBuf
MapLabel("transients_map");
24 /// shared memory segment path to use for Transients map extras
25 static const char *ExtrasLabel
= "transients_ex";
27 Transients::Transients(): map(NULL
), locals(NULL
)
31 Transients::~Transients()
40 const int64_t entryLimit
= EntryLimit();
42 return; // no SMP support or a misconfiguration
45 map
= new TransientsMap(MapLabel
);
48 extras
= shm_old(TransientsMapExtras
)(ExtrasLabel
);
50 locals
= new Locals(entryLimit
, 0);
54 Transients::getStats(StoreInfoStats
&stats
) const
56 #if TRANSIENT_STATS_SUPPORTED
57 const size_t pageSize
= Ipc::Mem::PageSize();
59 stats
.mem
.shared
= true;
61 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
63 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
64 stats
.mem
.count
= currentCount();
69 Transients::stat(StoreEntry
&e
) const
71 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
73 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
74 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
75 currentSize() / 1024.0,
76 Math::doublePercent(currentSize(), maxSize()));
79 const int limit
= map
->entryLimit();
80 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
82 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
83 currentCount(), (100.0 * currentCount() / limit
));
89 Transients::maintain()
91 // no lazy garbage collection needed
95 Transients::minSize() const
97 return 0; // XXX: irrelevant, but Store parent forces us to implement this
101 Transients::maxSize() const
103 // Squid currently does not limit the total size of all transient objects
104 return std::numeric_limits
<uint64_t>::max();
108 Transients::currentSize() const
110 // TODO: we do not get enough information to calculate this
111 // StoreEntry should update associated stores when its size changes
116 Transients::currentCount() const
118 return map
? map
->entryCount() : 0;
122 Transients::maxObjectSize() const
124 // Squid currently does not limit the size of a transient object
125 return std::numeric_limits
<uint64_t>::max();
129 Transients::reference(StoreEntry
&)
131 // no replacement policy (but the cache(s) storing the entry may have one)
135 Transients::dereference(StoreEntry
&, bool)
137 // no need to keep e in the global store_table for us; we have our own map
142 Transients::callback()
148 Transients::search(String
const, HttpRequest
*)
150 fatal("not implemented");
155 Transients::get(const cache_key
*key
)
161 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
165 // If we already have a local entry, the store_table should have found it.
166 // Since it did not, the local entry key must have changed from public to
167 // private. We still need to keep the private entry around for syncing as
168 // its clients depend on it, but we should not allow new clients to join.
169 if (StoreEntry
*oldE
= locals
->at(index
)) {
170 debugs(20, 3, "not joining private " << *oldE
);
171 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
172 } else if (StoreEntry
*newE
= copyFromShm(index
)) {
173 return newE
; // keep read lock to receive updates from others
176 // private entry or loading failure
177 map
->closeForReading(index
);
182 Transients::copyFromShm(const sfileno index
)
184 const TransientsMapExtras::Item
&extra
= extras
->items
[index
];
186 // create a brand new store entry and initialize it with stored info
187 StoreEntry
*e
= storeCreatePureEntry(extra
.url
, extra
.url
,
188 extra
.reqFlags
, extra
.reqMethod
);
191 e
->mem_obj
->method
= extra
.reqMethod
;
192 e
->mem_obj
->xitTable
.io
= MemObject::ioReading
;
193 e
->mem_obj
->xitTable
.index
= index
;
198 // How do we know its SMP- and not just locally-collapsed? A worker gets
199 // locally-collapsed entries from the local store_table, not Transients.
200 // TODO: Can we remove smpCollapsed by not syncing non-transient entries?
201 e
->mem_obj
->smpCollapsed
= true;
203 assert(!locals
->at(index
));
204 // We do not lock e because we do not want to prevent its destruction;
205 // e is tied to us via mem_obj so we will know when it is destructed.
206 locals
->at(index
) = e
;
211 Transients::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
213 // XXX: not needed but Store parent forces us to implement this
214 fatal("Transients::get(key,callback,data) should not be called");
218 Transients::findCollapsed(const sfileno index
)
223 if (StoreEntry
*oldE
= locals
->at(index
)) {
224 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
225 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
229 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
234 Transients::startWriting(StoreEntry
*e
, const RequestFlags
&reqFlags
,
235 const HttpRequestMethod
&reqMethod
)
239 assert(e
->mem_obj
->xitTable
.index
< 0);
242 debugs(20, 5, "No map to add " << *e
);
247 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
->key
), index
);
249 debugs(20, 5, "collision registering " << *e
);
254 if (copyToShm(*e
, index
, reqFlags
, reqMethod
)) {
256 e
->mem_obj
->xitTable
.io
= MemObject::ioWriting
;
257 e
->mem_obj
->xitTable
.index
= index
;
258 map
->startAppending(index
);
259 // keep write lock -- we will be supplying others with updates
262 // fall through to the error handling code
263 } catch (const std::exception
&x
) { // TODO: should we catch ... as well?
264 debugs(20, 2, "error keeping entry " << index
<<
265 ' ' << *e
<< ": " << x
.what());
266 // fall through to the error handling code
269 map
->abortWriting(index
);
272 /// copies all relevant local data to shared memory
274 Transients::copyToShm(const StoreEntry
&e
, const sfileno index
,
275 const RequestFlags
&reqFlags
,
276 const HttpRequestMethod
&reqMethod
)
278 TransientsMapExtras::Item
&extra
= extras
->items
[index
];
280 const char *url
= e
.url();
281 const size_t urlLen
= strlen(url
);
282 Must(urlLen
< sizeof(extra
.url
)); // we have space to store it all, plus 0
283 strncpy(extra
.url
, url
, sizeof(extra
.url
));
284 extra
.url
[urlLen
] = '\0';
286 extra
.reqFlags
= reqFlags
;
288 Must(reqMethod
!= Http::METHOD_OTHER
);
289 extra
.reqMethod
= reqMethod
.id();
295 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId
)
297 // TODO: we should probably find the entry being deleted and abort it
301 Transients::abandon(const StoreEntry
&e
)
303 assert(e
.mem_obj
&& map
);
304 map
->freeEntry(e
.mem_obj
->xitTable
.index
); // just marks the locked entry
305 CollapsedForwarding::Broadcast(e
);
306 // We do not unlock the entry now because the problem is most likely with
307 // the server resource rather than a specific cache writer, so we want to
308 // prevent other readers from collapsing requests for that resource.
312 Transients::abandoned(const StoreEntry
&e
) const
315 return abandonedAt(e
.mem_obj
->xitTable
.index
);
318 /// whether an in-transit entry at the index is now abandoned by its writer
320 Transients::abandonedAt(const sfileno index
) const
323 return map
->readableEntry(index
).waitingToBeFreed
;
327 Transients::completeWriting(const StoreEntry
&e
)
329 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) {
330 assert(e
.mem_obj
->xitTable
.io
== MemObject::ioWriting
);
331 // there will be no more updates from us after this, so we must prevent
332 // future readers from joining
333 map
->freeEntry(e
.mem_obj
->xitTable
.index
); // just marks the locked entry
334 map
->closeForWriting(e
.mem_obj
->xitTable
.index
);
335 e
.mem_obj
->xitTable
.index
= -1;
336 e
.mem_obj
->xitTable
.io
= MemObject::ioDone
;
341 Transients::readers(const StoreEntry
&e
) const
343 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.index
>= 0) {
345 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
351 Transients::markForUnlink(StoreEntry
&e
)
353 if (e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== MemObject::ioWriting
)
358 Transients::disconnect(MemObject
&mem_obj
)
360 if (mem_obj
.xitTable
.index
>= 0) {
362 if (mem_obj
.xitTable
.io
== MemObject::ioWriting
) {
363 map
->abortWriting(mem_obj
.xitTable
.index
);
365 assert(mem_obj
.xitTable
.io
== MemObject::ioReading
);
366 map
->closeForReading(mem_obj
.xitTable
.index
);
368 locals
->at(mem_obj
.xitTable
.index
) = NULL
;
369 mem_obj
.xitTable
.index
= -1;
370 mem_obj
.xitTable
.io
= MemObject::ioDone
;
374 /// calculates maximum number of entries we need to store and map
376 Transients::EntryLimit()
378 // TODO: we should also check whether any SMP-aware caching is configured
379 if (!UsingSmp() || !Config
.onoff
.collapsed_forwarding
)
380 return 0; // no SMP collapsed forwarding possible or needed
382 return 16*1024; // TODO: make configurable?
385 /// initializes shared memory segment used by Transients
386 class TransientsRr
: public Ipc::Mem::RegisteredRunner
389 /* RegisteredRunner API */
390 TransientsRr(): mapOwner(NULL
), extrasOwner(NULL
) {}
391 virtual void useConfig();
392 virtual ~TransientsRr();
395 virtual void create();
398 TransientsMap::Owner
*mapOwner
;
399 Ipc::Mem::Owner
<TransientsMapExtras
> *extrasOwner
;
402 RunnerRegistrationEntry(TransientsRr
);
405 TransientsRr::useConfig()
407 assert(Config
.memShared
.configured());
408 Ipc::Mem::RegisteredRunner::useConfig();
412 TransientsRr::create()
414 if (!Config
.onoff
.collapsed_forwarding
)
417 const int64_t entryLimit
= Transients::EntryLimit();
419 return; // no SMP configured or a misconfiguration
422 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
424 extrasOwner
= shm_new(TransientsMapExtras
)(ExtrasLabel
, entryLimit
);
427 TransientsRr::~TransientsRr()