2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
23 #include "Transients.h"
27 /// shared memory segment path to use for Transients map
28 static const SBuf
MapLabel("transients_map");
30 Transients::Transients(): map(NULL
), locals(NULL
)
34 Transients::~Transients()
44 const int64_t entryLimit
= EntryLimit();
45 assert(entryLimit
> 0);
48 map
= new TransientsMap(MapLabel
);
50 map
->disableHitValidation(); // Transients lacks slices to validate
52 locals
= new Locals(entryLimit
, 0);
56 Transients::getStats(StoreInfoStats
&stats
) const
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
71 Transients::stat(StoreEntry
&e
) const
73 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
75 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
76 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
77 currentSize() / 1024.0,
78 Math::doublePercent(currentSize(), maxSize()));
81 const int limit
= map
->entryLimit();
82 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
84 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
85 currentCount(), (100.0 * currentCount() / limit
));
91 Transients::maintain()
93 // no lazy garbage collection needed
97 Transients::minSize() const
99 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103 Transients::maxSize() const
105 // Squid currently does not limit the total size of all transient objects
106 return std::numeric_limits
<uint64_t>::max();
110 Transients::currentSize() const
112 // TODO: we do not get enough information to calculate this
113 // StoreEntry should update associated stores when its size changes
118 Transients::currentCount() const
120 return map
? map
->entryCount() : 0;
124 Transients::maxObjectSize() const
126 // Squid currently does not limit the size of a transient object
127 return std::numeric_limits
<uint64_t>::max();
131 Transients::reference(StoreEntry
&)
133 // no replacement policy (but the cache(s) storing the entry may have one)
137 Transients::dereference(StoreEntry
&)
139 // no need to keep e in the global store_table for us; we have our own map
144 Transients::get(const cache_key
*key
)
150 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
154 // If we already have a local entry, the store_table should have found it.
155 // Since it did not, the local entry key must have changed from public to
156 // private. We still need to keep the private entry around for syncing as
157 // its clients depend on it, but we should not allow new clients to join.
158 if (StoreEntry
*oldE
= locals
->at(index
)) {
159 debugs(20, 3, "not joining private " << *oldE
);
160 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
161 map
->closeForReadingAndFreeIdle(index
);
165 // store hadWriter before checking ENTRY_REQUIRES_COLLAPSING to avoid racing
166 // the writer that clears that flag and then leaves
167 const auto hadWriter
= map
->peekAtWriter(index
);
168 if (!hadWriter
&& EBIT_TEST(anchor
->basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
169 debugs(20, 3, "not joining abandoned entry " << index
);
170 map
->closeForReadingAndFreeIdle(index
);
174 StoreEntry
*e
= new StoreEntry();
175 e
->createMemObject();
176 e
->mem_obj
->xitTable
.index
= index
;
177 e
->mem_obj
->xitTable
.io
= Store::ioReading
;
178 anchor
->exportInto(*e
);
180 if (EBIT_TEST(anchor
->basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
182 e
->setCollapsingRequirement(true);
185 // keep read lock to receive updates from others
190 Transients::findCollapsed(const sfileno index
)
195 if (StoreEntry
*oldE
= locals
->at(index
)) {
196 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
197 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
201 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
206 Transients::clearCollapsingRequirement(const StoreEntry
&e
)
209 assert(e
.hasTransients());
211 const auto idx
= e
.mem_obj
->xitTable
.index
;
212 auto &anchor
= map
->writeableEntry(idx
);
213 if (EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
214 EBIT_CLR(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
215 CollapsedForwarding::Broadcast(e
);
220 Transients::monitorIo(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
222 if (!e
->hasTransients()) {
223 addEntry(e
, key
, direction
);
224 assert(e
->hasTransients());
227 const auto index
= e
->mem_obj
->xitTable
.index
;
228 if (const auto old
= locals
->at(index
)) {
231 // We do not lock e because we do not want to prevent its destruction;
232 // e is tied to us via mem_obj so we will know when it is destructed.
233 locals
->at(index
) = e
;
237 /// creates a new Transients entry
239 Transients::addEntry(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
243 assert(!e
->hasTransients());
245 Must(map
); // configured to track transients
248 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(key
, index
);
249 Must(slot
); // no writer collisions
251 // set ASAP in hope to unlock the slot if something throws
252 e
->mem_obj
->xitTable
.index
= index
;
253 e
->mem_obj
->xitTable
.io
= Store::ioWriting
;
256 if (direction
== Store::ioWriting
) {
257 // allow reading and receive remote DELETE events, but do not switch to
258 // the reading lock because transientReaders() callers want true readers
259 map
->startAppending(index
);
261 assert(direction
== Store::ioReading
);
262 // keep the entry locked (for reading) to receive remote DELETE events
263 map
->switchWritingToReading(index
);
264 e
->mem_obj
->xitTable
.io
= Store::ioReading
;
269 Transients::hasWriter(const StoreEntry
&e
)
271 if (!e
.hasTransients())
273 return map
->peekAtWriter(e
.mem_obj
->xitTable
.index
);
277 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId
)
279 // TODO: we should probably find the entry being deleted and abort it
283 Transients::status(const StoreEntry
&entry
, Transients::EntryStatus
&entryStatus
) const
286 assert(entry
.hasTransients());
287 const auto idx
= entry
.mem_obj
->xitTable
.index
;
288 const auto &anchor
= isWriter(entry
) ?
289 map
->writeableEntry(idx
) : map
->readableEntry(idx
);
290 entryStatus
.abortedByWriter
= anchor
.writerHalted
;
291 entryStatus
.waitingToBeFreed
= anchor
.waitingToBeFreed
;
292 entryStatus
.collapsed
= EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
296 Transients::completeWriting(const StoreEntry
&e
)
298 assert(e
.hasTransients());
300 map
->switchWritingToReading(e
.mem_obj
->xitTable
.index
);
301 e
.mem_obj
->xitTable
.io
= Store::ioReading
;
305 Transients::readers(const StoreEntry
&e
) const
307 if (e
.hasTransients()) {
309 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
315 Transients::evictCached(StoreEntry
&e
)
318 if (e
.hasTransients()) {
319 const auto index
= e
.mem_obj
->xitTable
.index
;
320 if (map
->freeEntry(index
)) {
321 // Delay syncCollapsed(index) which may end `e` wait for updates.
322 // Calling it directly/here creates complex reentrant call chains.
323 CollapsedForwarding::Broadcast(e
, true);
325 } // else nothing to do because e must be private
329 Transients::evictIfFound(const cache_key
*key
)
334 const sfileno index
= map
->fileNoByKey(key
);
335 if (map
->freeEntry(index
))
336 CollapsedForwarding::Broadcast(index
, true);
340 Transients::disconnect(StoreEntry
&entry
)
342 debugs(20, 5, entry
);
343 if (entry
.hasTransients()) {
344 auto &xitTable
= entry
.mem_obj
->xitTable
;
346 if (isWriter(entry
)) {
347 map
->abortWriting(xitTable
.index
);
349 assert(isReader(entry
));
350 map
->closeForReadingAndFreeIdle(xitTable
.index
);
352 locals
->at(xitTable
.index
) = nullptr;
354 xitTable
.io
= Store::ioDone
;
358 /// calculates maximum number of entries we need to store and map
360 Transients::EntryLimit()
362 return (UsingSmp() && Store::Controller::SmpAware()) ?
363 Config
.shared_transient_entries_limit
: 0;
367 Transients::markedForDeletion(const cache_key
*key
) const
370 return map
->markedForDeletion(key
);
374 Transients::isReader(const StoreEntry
&e
) const
376 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioReading
;
380 Transients::isWriter(const StoreEntry
&e
) const
382 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioWriting
;
385 /// initializes shared memory segment used by Transients
386 class TransientsRr
: public Ipc::Mem::RegisteredRunner
389 /* RegisteredRunner API */
390 virtual void useConfig();
391 virtual ~TransientsRr();
394 virtual void create();
397 TransientsMap::Owner
*mapOwner
= nullptr;
400 RunnerRegistrationEntry(TransientsRr
);
403 TransientsRr::useConfig()
405 assert(Config
.memShared
.configured());
406 Ipc::Mem::RegisteredRunner::useConfig();
410 TransientsRr::create()
412 const int64_t entryLimit
= Transients::EntryLimit();
414 return; // no SMP configured or a misconfiguration
417 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
420 TransientsRr::~TransientsRr()