2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
23 #include "Transients.h"
27 /// shared memory segment path to use for Transients map
28 static const SBuf
MapLabel("transients_map");
30 Transients::Transients(): map(NULL
), locals(NULL
)
34 Transients::~Transients()
44 const int64_t entryLimit
= EntryLimit();
45 assert(entryLimit
> 0);
48 map
= new TransientsMap(MapLabel
);
50 map
->disableHitValidation(); // Transients lacks slices to validate
52 locals
= new Locals(entryLimit
, 0);
56 Transients::getStats(StoreInfoStats
&stats
) const
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
73 Transients::stat(StoreEntry
&e
) const
75 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
77 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
78 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
79 currentSize() / 1024.0,
80 Math::doublePercent(currentSize(), maxSize()));
83 const int limit
= map
->entryLimit();
84 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
86 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
87 currentCount(), (100.0 * currentCount() / limit
));
93 Transients::maintain()
95 // no lazy garbage collection needed
99 Transients::minSize() const
101 return 0; // XXX: irrelevant, but Store parent forces us to implement this
105 Transients::maxSize() const
107 // Squid currently does not limit the total size of all transient objects
108 return std::numeric_limits
<uint64_t>::max();
112 Transients::currentSize() const
114 // TODO: we do not get enough information to calculate this
115 // StoreEntry should update associated stores when its size changes
120 Transients::currentCount() const
122 return map
? map
->entryCount() : 0;
126 Transients::maxObjectSize() const
128 // Squid currently does not limit the size of a transient object
129 return std::numeric_limits
<uint64_t>::max();
133 Transients::reference(StoreEntry
&)
135 // no replacement policy (but the cache(s) storing the entry may have one)
139 Transients::dereference(StoreEntry
&)
141 // no need to keep e in the global store_table for us; we have our own map
146 Transients::get(const cache_key
*key
)
152 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
156 // If we already have a local entry, the store_table should have found it.
157 // Since it did not, the local entry key must have changed from public to
158 // private. We still need to keep the private entry around for syncing as
159 // its clients depend on it, but we should not allow new clients to join.
160 if (StoreEntry
*oldE
= locals
->at(index
)) {
161 debugs(20, 3, "not joining private " << *oldE
);
162 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
163 map
->closeForReadingAndFreeIdle(index
);
167 // store hadWriter before checking ENTRY_REQUIRES_COLLAPSING to avoid racing
168 // the writer that clears that flag and then leaves
169 const auto hadWriter
= map
->peekAtWriter(index
);
170 if (!hadWriter
&& EBIT_TEST(anchor
->basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
171 debugs(20, 3, "not joining abandoned entry " << index
);
172 map
->closeForReadingAndFreeIdle(index
);
176 StoreEntry
*e
= new StoreEntry();
177 e
->createMemObject();
178 anchorEntry(*e
, index
, *anchor
);
180 // keep read lock to receive updates from others
185 Transients::findCollapsed(const sfileno index
)
190 if (StoreEntry
*oldE
= locals
->at(index
)) {
191 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
192 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
196 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
201 Transients::clearCollapsingRequirement(const StoreEntry
&e
)
204 assert(e
.hasTransients());
206 const auto idx
= e
.mem_obj
->xitTable
.index
;
207 auto &anchor
= map
->writeableEntry(idx
);
208 if (EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
209 EBIT_CLR(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
210 CollapsedForwarding::Broadcast(e
);
215 Transients::monitorIo(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
217 if (!e
->hasTransients()) {
218 addEntry(e
, key
, direction
);
219 assert(e
->hasTransients());
222 const auto index
= e
->mem_obj
->xitTable
.index
;
223 if (const auto old
= locals
->at(index
)) {
226 // We do not lock e because we do not want to prevent its destruction;
227 // e is tied to us via mem_obj so we will know when it is destructed.
228 locals
->at(index
) = e
;
232 /// creates a new Transients entry
234 Transients::addEntry(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
238 assert(!e
->hasTransients());
240 Must(map
); // configured to track transients
242 if (direction
== Store::ioWriting
)
243 return addWriterEntry(*e
, key
);
245 assert(direction
== Store::ioReading
);
246 addReaderEntry(*e
, key
);
249 /// addEntry() helper used for cache entry creators/writers
251 Transients::addWriterEntry(StoreEntry
&e
, const cache_key
*key
)
254 const auto anchor
= map
->openForWriting(key
, index
);
256 throw TextException("writer collision", Here());
258 // set ASAP in hope to unlock the slot if something throws
259 // and to provide index to such methods as hasWriter()
260 auto &xitTable
= e
.mem_obj
->xitTable
;
261 xitTable
.index
= index
;
262 xitTable
.io
= Store::ioWriting
;
265 // allow reading and receive remote DELETE events, but do not switch to
266 // the reading lock because transientReaders() callers want true readers
267 map
->startAppending(index
);
270 /// addEntry() helper used for cache readers
271 /// readers do not modify the cache, but they must create a Transients entry
273 Transients::addReaderEntry(StoreEntry
&e
, const cache_key
*key
)
276 const auto anchor
= map
->openOrCreateForReading(key
, index
, e
);
278 throw TextException("reader collision", Here());
280 anchorEntry(e
, index
, *anchor
);
281 // keep the entry locked (for reading) to receive remote DELETE events
284 /// fills (recently created) StoreEntry with information currently in Transients
286 Transients::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
288 // set ASAP in hope to unlock the slot if something throws
289 // and to provide index to such methods as hasWriter()
290 auto &xitTable
= e
.mem_obj
->xitTable
;
291 xitTable
.index
= index
;
292 xitTable
.io
= Store::ioReading
;
294 const auto hadWriter
= hasWriter(e
); // before computing collapsingRequired
295 anchor
.exportInto(e
);
296 const bool collapsingRequired
= EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
297 assert(!collapsingRequired
|| hadWriter
);
298 e
.setCollapsingRequirement(collapsingRequired
);
302 Transients::hasWriter(const StoreEntry
&e
)
304 if (!e
.hasTransients())
306 return map
->peekAtWriter(e
.mem_obj
->xitTable
.index
);
310 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId
)
312 // TODO: we should probably find the entry being deleted and abort it
316 Transients::status(const StoreEntry
&entry
, Transients::EntryStatus
&entryStatus
) const
319 assert(entry
.hasTransients());
320 const auto idx
= entry
.mem_obj
->xitTable
.index
;
321 const auto &anchor
= isWriter(entry
) ?
322 map
->writeableEntry(idx
) : map
->readableEntry(idx
);
323 entryStatus
.abortedByWriter
= anchor
.writerHalted
;
324 entryStatus
.waitingToBeFreed
= anchor
.waitingToBeFreed
;
325 entryStatus
.collapsed
= EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
329 Transients::completeWriting(const StoreEntry
&e
)
331 assert(e
.hasTransients());
333 map
->switchWritingToReading(e
.mem_obj
->xitTable
.index
);
334 e
.mem_obj
->xitTable
.io
= Store::ioReading
;
338 Transients::readers(const StoreEntry
&e
) const
340 if (e
.hasTransients()) {
342 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
348 Transients::evictCached(StoreEntry
&e
)
351 if (e
.hasTransients()) {
352 const auto index
= e
.mem_obj
->xitTable
.index
;
353 if (map
->freeEntry(index
)) {
354 // Delay syncCollapsed(index) which may end `e` wait for updates.
355 // Calling it directly/here creates complex reentrant call chains.
356 CollapsedForwarding::Broadcast(e
, true);
358 } // else nothing to do because e must be private
362 Transients::evictIfFound(const cache_key
*key
)
367 const sfileno index
= map
->fileNoByKey(key
);
368 if (map
->freeEntry(index
))
369 CollapsedForwarding::Broadcast(index
, true);
373 Transients::disconnect(StoreEntry
&entry
)
375 debugs(20, 5, entry
);
376 if (entry
.hasTransients()) {
377 auto &xitTable
= entry
.mem_obj
->xitTable
;
379 if (isWriter(entry
)) {
380 map
->abortWriting(xitTable
.index
);
382 assert(isReader(entry
));
383 map
->closeForReadingAndFreeIdle(xitTable
.index
);
385 locals
->at(xitTable
.index
) = nullptr;
387 xitTable
.io
= Store::ioDone
;
391 /// calculates maximum number of entries we need to store and map
393 Transients::EntryLimit()
395 return (UsingSmp() && Store::Controller::SmpAware()) ?
396 Config
.shared_transient_entries_limit
: 0;
400 Transients::markedForDeletion(const cache_key
*key
) const
403 return map
->markedForDeletion(key
);
407 Transients::isReader(const StoreEntry
&e
) const
409 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioReading
;
413 Transients::isWriter(const StoreEntry
&e
) const
415 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioWriting
;
418 /// initializes shared memory segment used by Transients
419 class TransientsRr
: public Ipc::Mem::RegisteredRunner
422 /* RegisteredRunner API */
423 virtual void useConfig();
424 virtual ~TransientsRr();
427 virtual void create();
430 TransientsMap::Owner
*mapOwner
= nullptr;
433 RunnerRegistrationEntry(TransientsRr
);
436 TransientsRr::useConfig()
438 assert(Config
.memShared
.configured());
439 Ipc::Mem::RegisteredRunner::useConfig();
443 TransientsRr::create()
445 const int64_t entryLimit
= Transients::EntryLimit();
447 return; // no SMP configured or a misconfiguration
450 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
453 TransientsRr::~TransientsRr()