2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
23 #include "Transients.h"
27 /// shared memory segment path to use for Transients map
28 static const SBuf
MapLabel("transients_map");
30 Transients::Transients(): map(NULL
), locals(NULL
)
34 Transients::~Transients()
44 const int64_t entryLimit
= EntryLimit();
45 assert(entryLimit
> 0);
48 map
= new TransientsMap(MapLabel
);
50 map
->disableHitValidation(); // Transients lacks slices to validate
52 locals
= new Locals(entryLimit
, 0);
56 Transients::getStats(StoreInfoStats
&stats
) const
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
71 Transients::stat(StoreEntry
&e
) const
73 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
75 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
76 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
77 currentSize() / 1024.0,
78 Math::doublePercent(currentSize(), maxSize()));
81 const int limit
= map
->entryLimit();
82 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
84 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
85 currentCount(), (100.0 * currentCount() / limit
));
91 Transients::maintain()
93 // no lazy garbage collection needed
97 Transients::minSize() const
99 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103 Transients::maxSize() const
105 // Squid currently does not limit the total size of all transient objects
106 return std::numeric_limits
<uint64_t>::max();
110 Transients::currentSize() const
112 // TODO: we do not get enough information to calculate this
113 // StoreEntry should update associated stores when its size changes
118 Transients::currentCount() const
120 return map
? map
->entryCount() : 0;
124 Transients::maxObjectSize() const
126 // Squid currently does not limit the size of a transient object
127 return std::numeric_limits
<uint64_t>::max();
131 Transients::reference(StoreEntry
&)
133 // no replacement policy (but the cache(s) storing the entry may have one)
137 Transients::dereference(StoreEntry
&)
139 // no need to keep e in the global store_table for us; we have our own map
144 Transients::get(const cache_key
*key
)
150 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
154 // If we already have a local entry, the store_table should have found it.
155 // Since it did not, the local entry key must have changed from public to
156 // private. We still need to keep the private entry around for syncing as
157 // its clients depend on it, but we should not allow new clients to join.
158 if (StoreEntry
*oldE
= locals
->at(index
)) {
159 debugs(20, 3, "not joining private " << *oldE
);
160 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
161 map
->closeForReadingAndFreeIdle(index
);
165 // store hadWriter before checking ENTRY_REQUIRES_COLLAPSING to avoid racing
166 // the writer that clears that flag and then leaves
167 const auto hadWriter
= map
->peekAtWriter(index
);
168 if (!hadWriter
&& EBIT_TEST(anchor
->basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
169 debugs(20, 3, "not joining abandoned entry " << index
);
170 map
->closeForReadingAndFreeIdle(index
);
174 StoreEntry
*e
= new StoreEntry();
175 e
->createMemObject();
176 anchorEntry(*e
, index
, *anchor
);
178 // keep read lock to receive updates from others
183 Transients::findCollapsed(const sfileno index
)
188 if (StoreEntry
*oldE
= locals
->at(index
)) {
189 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
190 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
194 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
199 Transients::clearCollapsingRequirement(const StoreEntry
&e
)
202 assert(e
.hasTransients());
204 const auto idx
= e
.mem_obj
->xitTable
.index
;
205 auto &anchor
= map
->writeableEntry(idx
);
206 if (EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
207 EBIT_CLR(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
208 CollapsedForwarding::Broadcast(e
);
213 Transients::monitorIo(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
215 if (!e
->hasTransients()) {
216 addEntry(e
, key
, direction
);
217 assert(e
->hasTransients());
220 const auto index
= e
->mem_obj
->xitTable
.index
;
221 if (const auto old
= locals
->at(index
)) {
224 // We do not lock e because we do not want to prevent its destruction;
225 // e is tied to us via mem_obj so we will know when it is destructed.
226 locals
->at(index
) = e
;
230 /// creates a new Transients entry
232 Transients::addEntry(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
236 assert(!e
->hasTransients());
238 Must(map
); // configured to track transients
240 if (direction
== Store::ioWriting
)
241 return addWriterEntry(*e
, key
);
243 assert(direction
== Store::ioReading
);
244 addReaderEntry(*e
, key
);
247 /// addEntry() helper used for cache entry creators/writers
249 Transients::addWriterEntry(StoreEntry
&e
, const cache_key
*key
)
252 const auto anchor
= map
->openForWriting(key
, index
);
254 throw TextException("writer collision", Here());
256 // set ASAP in hope to unlock the slot if something throws
257 // and to provide index to such methods as hasWriter()
258 auto &xitTable
= e
.mem_obj
->xitTable
;
259 xitTable
.index
= index
;
260 xitTable
.io
= Store::ioWriting
;
263 // allow reading and receive remote DELETE events, but do not switch to
264 // the reading lock because transientReaders() callers want true readers
265 map
->startAppending(index
);
268 /// addEntry() helper used for cache readers
269 /// readers do not modify the cache, but they must create a Transients entry
271 Transients::addReaderEntry(StoreEntry
&e
, const cache_key
*key
)
274 const auto anchor
= map
->openOrCreateForReading(key
, index
, e
);
276 throw TextException("reader collision", Here());
278 anchorEntry(e
, index
, *anchor
);
279 // keep the entry locked (for reading) to receive remote DELETE events
282 /// fills (recently created) StoreEntry with information currently in Transients
284 Transients::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
286 // set ASAP in hope to unlock the slot if something throws
287 // and to provide index to such methods as hasWriter()
288 auto &xitTable
= e
.mem_obj
->xitTable
;
289 xitTable
.index
= index
;
290 xitTable
.io
= Store::ioReading
;
292 const auto hadWriter
= hasWriter(e
); // before computing collapsingRequired
293 anchor
.exportInto(e
);
294 const bool collapsingRequired
= EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
295 assert(!collapsingRequired
|| hadWriter
);
296 e
.setCollapsingRequirement(collapsingRequired
);
300 Transients::hasWriter(const StoreEntry
&e
)
302 if (!e
.hasTransients())
304 return map
->peekAtWriter(e
.mem_obj
->xitTable
.index
);
308 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId
)
310 // TODO: we should probably find the entry being deleted and abort it
314 Transients::status(const StoreEntry
&entry
, Transients::EntryStatus
&entryStatus
) const
317 assert(entry
.hasTransients());
318 const auto idx
= entry
.mem_obj
->xitTable
.index
;
319 const auto &anchor
= isWriter(entry
) ?
320 map
->writeableEntry(idx
) : map
->readableEntry(idx
);
321 entryStatus
.abortedByWriter
= anchor
.writerHalted
;
322 entryStatus
.waitingToBeFreed
= anchor
.waitingToBeFreed
;
323 entryStatus
.collapsed
= EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
327 Transients::completeWriting(const StoreEntry
&e
)
329 assert(e
.hasTransients());
331 map
->switchWritingToReading(e
.mem_obj
->xitTable
.index
);
332 e
.mem_obj
->xitTable
.io
= Store::ioReading
;
336 Transients::readers(const StoreEntry
&e
) const
338 if (e
.hasTransients()) {
340 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
346 Transients::evictCached(StoreEntry
&e
)
349 if (e
.hasTransients()) {
350 const auto index
= e
.mem_obj
->xitTable
.index
;
351 if (map
->freeEntry(index
)) {
352 // Delay syncCollapsed(index) which may end `e` wait for updates.
353 // Calling it directly/here creates complex reentrant call chains.
354 CollapsedForwarding::Broadcast(e
, true);
356 } // else nothing to do because e must be private
360 Transients::evictIfFound(const cache_key
*key
)
365 const sfileno index
= map
->fileNoByKey(key
);
366 if (map
->freeEntry(index
))
367 CollapsedForwarding::Broadcast(index
, true);
371 Transients::disconnect(StoreEntry
&entry
)
373 debugs(20, 5, entry
);
374 if (entry
.hasTransients()) {
375 auto &xitTable
= entry
.mem_obj
->xitTable
;
377 if (isWriter(entry
)) {
378 map
->abortWriting(xitTable
.index
);
380 assert(isReader(entry
));
381 map
->closeForReadingAndFreeIdle(xitTable
.index
);
383 locals
->at(xitTable
.index
) = nullptr;
385 xitTable
.io
= Store::ioDone
;
389 /// calculates maximum number of entries we need to store and map
391 Transients::EntryLimit()
393 return (UsingSmp() && Store::Controller::SmpAware()) ?
394 Config
.shared_transient_entries_limit
: 0;
398 Transients::markedForDeletion(const cache_key
*key
) const
401 return map
->markedForDeletion(key
);
405 Transients::isReader(const StoreEntry
&e
) const
407 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioReading
;
411 Transients::isWriter(const StoreEntry
&e
) const
413 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioWriting
;
416 /// initializes shared memory segment used by Transients
417 class TransientsRr
: public Ipc::Mem::RegisteredRunner
420 /* RegisteredRunner API */
421 virtual void useConfig();
422 virtual ~TransientsRr();
425 virtual void create();
428 TransientsMap::Owner
*mapOwner
= nullptr;
431 RunnerRegistrationEntry(TransientsRr
);
434 TransientsRr::useConfig()
436 assert(Config
.memShared
.configured());
437 Ipc::Mem::RegisteredRunner::useConfig();
441 TransientsRr::create()
443 const int64_t entryLimit
= Transients::EntryLimit();
445 return; // no SMP configured or a misconfiguration
448 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
451 TransientsRr::~TransientsRr()