2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
23 #include "Transients.h"
27 /// shared memory segment path to use for Transients map
28 static const SBuf
MapLabel("transients_map");
30 Transients::Transients(): map(nullptr), locals(nullptr)
34 Transients::~Transients()
44 const int64_t entryLimit
= EntryLimit();
45 assert(entryLimit
> 0);
48 map
= new TransientsMap(MapLabel
);
50 map
->disableHitValidation(); // Transients lacks slices to validate
52 locals
= new Locals(entryLimit
, nullptr);
56 Transients::getStats(StoreInfoStats
&stats
) const
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
73 Transients::stat(StoreEntry
&e
) const
75 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
77 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
78 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
79 currentSize() / 1024.0,
80 Math::doublePercent(currentSize(), maxSize()));
83 const int limit
= map
->entryLimit();
84 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
86 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
87 currentCount(), (100.0 * currentCount() / limit
));
93 Transients::maintain()
95 // no lazy garbage collection needed
99 Transients::minSize() const
101 return 0; // XXX: irrelevant, but Store parent forces us to implement this
105 Transients::maxSize() const
107 // Squid currently does not limit the total size of all transient objects
108 return std::numeric_limits
<uint64_t>::max();
112 Transients::currentSize() const
114 // TODO: we do not get enough information to calculate this
115 // StoreEntry should update associated stores when its size changes
120 Transients::currentCount() const
122 return map
? map
->entryCount() : 0;
126 Transients::maxObjectSize() const
128 // Squid currently does not limit the size of a transient object
129 return std::numeric_limits
<uint64_t>::max();
133 Transients::reference(StoreEntry
&)
135 // no replacement policy (but the cache(s) storing the entry may have one)
139 Transients::dereference(StoreEntry
&)
141 // no need to keep e in the global store_table for us; we have our own map
146 Transients::get(const cache_key
*key
)
152 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
156 // If we already have a local entry, the store_table should have found it.
157 // Since it did not, the local entry key must have changed from public to
158 // private. We still need to keep the private entry around for syncing as
159 // its clients depend on it, but we should not allow new clients to join.
160 if (StoreEntry
*oldE
= locals
->at(index
)) {
161 debugs(20, 3, "not joining private " << *oldE
);
162 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
163 map
->closeForReadingAndFreeIdle(index
);
167 StoreEntry
*e
= new StoreEntry();
168 e
->createMemObject();
169 e
->mem_obj
->xitTable
.open(index
, Store::ioReading
);
171 // keep read lock to receive updates from others
176 Transients::findCollapsed(const sfileno index
)
181 if (StoreEntry
*oldE
= locals
->at(index
)) {
182 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
183 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
187 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
192 Transients::monitorIo(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
194 if (!e
->hasTransients()) {
195 addEntry(e
, key
, direction
);
196 assert(e
->hasTransients());
199 const auto index
= e
->mem_obj
->xitTable
.index
;
200 if (const auto old
= locals
->at(index
)) {
203 // We do not lock e because we do not want to prevent its destruction;
204 // e is tied to us via mem_obj so we will know when it is destructed.
205 locals
->at(index
) = e
;
209 /// creates a new Transients entry
211 Transients::addEntry(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
215 assert(!e
->hasTransients());
217 Must(map
); // configured to track transients
219 if (direction
== Store::ioWriting
)
220 return addWriterEntry(*e
, key
);
222 assert(direction
== Store::ioReading
);
223 addReaderEntry(*e
, key
);
226 /// addEntry() helper used for cache entry creators/writers
228 Transients::addWriterEntry(StoreEntry
&e
, const cache_key
*key
)
231 const auto anchor
= map
->openForWriting(key
, index
);
233 throw TextException("writer collision", Here());
235 // set ASAP in hope to unlock the slot if something throws
236 // and to provide index to such methods as hasWriter()
237 e
.mem_obj
->xitTable
.open(index
, Store::ioWriting
);
240 // allow reading and receive remote DELETE events, but do not switch to
241 // the reading lock because transientReaders() callers want true readers
242 map
->startAppending(index
);
245 /// addEntry() helper used for cache readers
246 /// readers do not modify the cache, but they must create a Transients entry
248 Transients::addReaderEntry(StoreEntry
&e
, const cache_key
*key
)
251 const auto anchor
= map
->openOrCreateForReading(key
, index
);
253 throw TextException("reader collision", Here());
255 e
.mem_obj
->xitTable
.open(index
, Store::ioReading
);
256 // keep the entry locked (for reading) to receive remote DELETE events
260 Transients::hasWriter(const StoreEntry
&e
)
262 if (!e
.hasTransients())
264 return map
->peekAtWriter(e
.mem_obj
->xitTable
.index
);
268 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId
)
270 // TODO: we should probably find the entry being deleted and abort it
274 Transients::status(const StoreEntry
&entry
, Transients::EntryStatus
&entryStatus
) const
277 assert(entry
.hasTransients());
278 const auto idx
= entry
.mem_obj
->xitTable
.index
;
279 const auto &anchor
= isWriter(entry
) ?
280 map
->writeableEntry(idx
) : map
->readableEntry(idx
);
281 entryStatus
.hasWriter
= anchor
.writing();
282 entryStatus
.waitingToBeFreed
= anchor
.waitingToBeFreed
;
286 Transients::completeWriting(const StoreEntry
&e
)
289 assert(e
.hasTransients());
291 map
->switchWritingToReading(e
.mem_obj
->xitTable
.index
);
292 e
.mem_obj
->xitTable
.io
= Store::ioReading
;
293 CollapsedForwarding::Broadcast(e
);
297 Transients::readers(const StoreEntry
&e
) const
299 if (e
.hasTransients()) {
301 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
307 Transients::evictCached(StoreEntry
&e
)
310 if (e
.hasTransients()) {
311 const auto index
= e
.mem_obj
->xitTable
.index
;
312 if (map
->freeEntry(index
)) {
313 // Delay syncCollapsed(index) which may end `e` wait for updates.
314 // Calling it directly/here creates complex reentrant call chains.
315 CollapsedForwarding::Broadcast(e
, true);
317 } // else nothing to do because e must be private
321 Transients::evictIfFound(const cache_key
*key
)
326 const sfileno index
= map
->fileNoByKey(key
);
327 if (map
->freeEntry(index
))
328 CollapsedForwarding::Broadcast(index
, true);
332 Transients::disconnect(StoreEntry
&entry
)
334 debugs(20, 5, entry
);
335 if (entry
.hasTransients()) {
336 auto &xitTable
= entry
.mem_obj
->xitTable
;
338 if (isWriter(entry
)) {
339 // completeWriting() was not called, so there could be an active
340 // Store writer out there, but we should not abortWriting() here
341 // because another writer may have succeeded, making readers happy.
342 // If none succeeded, the readers will notice the lack of writers.
343 map
->closeForWriting(xitTable
.index
);
344 CollapsedForwarding::Broadcast(entry
);
346 assert(isReader(entry
));
347 map
->closeForReadingAndFreeIdle(xitTable
.index
);
349 locals
->at(xitTable
.index
) = nullptr;
354 /// calculates maximum number of entries we need to store and map
356 Transients::EntryLimit()
358 return (UsingSmp() && Store::Controller::SmpAware()) ?
359 Config
.shared_transient_entries_limit
: 0;
363 Transients::markedForDeletion(const cache_key
*key
) const
366 return map
->markedForDeletion(key
);
370 Transients::isReader(const StoreEntry
&e
) const
372 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioReading
;
376 Transients::isWriter(const StoreEntry
&e
) const
378 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioWriting
;
381 /// initializes shared memory segment used by Transients
382 class TransientsRr
: public Ipc::Mem::RegisteredRunner
385 /* RegisteredRunner API */
386 void useConfig() override
;
387 ~TransientsRr() override
;
390 void create() override
;
393 TransientsMap::Owner
*mapOwner
= nullptr;
396 DefineRunnerRegistrator(TransientsRr
);
399 TransientsRr::useConfig()
401 assert(Config
.memShared
.configured());
402 Ipc::Mem::RegisteredRunner::useConfig();
406 TransientsRr::create()
408 const int64_t entryLimit
= Transients::EntryLimit();
410 return; // no SMP configured or a misconfiguration
413 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
416 TransientsRr::~TransientsRr()