2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Storage Manager */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
23 #include "Transients.h"
27 /// shared memory segment path to use for Transients map
28 static const SBuf
MapLabel("transients_map");
30 Transients::Transients(): map(NULL
), locals(NULL
)
34 Transients::~Transients()
44 const int64_t entryLimit
= EntryLimit();
45 assert(entryLimit
> 0);
48 map
= new TransientsMap(MapLabel
);
50 map
->disableHitValidation(); // Transients lacks slices to validate
52 locals
= new Locals(entryLimit
, 0);
56 Transients::getStats(StoreInfoStats
&stats
) const
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
71 Transients::stat(StoreEntry
&e
) const
73 storeAppendPrintf(&e
, "\n\nTransient Objects\n");
75 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
76 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
77 currentSize() / 1024.0,
78 Math::doublePercent(currentSize(), maxSize()));
81 const int limit
= map
->entryLimit();
82 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
84 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
85 currentCount(), (100.0 * currentCount() / limit
));
91 Transients::maintain()
93 // no lazy garbage collection needed
97 Transients::minSize() const
99 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103 Transients::maxSize() const
105 // Squid currently does not limit the total size of all transient objects
106 return std::numeric_limits
<uint64_t>::max();
110 Transients::currentSize() const
112 // TODO: we do not get enough information to calculate this
113 // StoreEntry should update associated stores when its size changes
118 Transients::currentCount() const
120 return map
? map
->entryCount() : 0;
124 Transients::maxObjectSize() const
126 // Squid currently does not limit the size of a transient object
127 return std::numeric_limits
<uint64_t>::max();
131 Transients::reference(StoreEntry
&)
133 // no replacement policy (but the cache(s) storing the entry may have one)
137 Transients::dereference(StoreEntry
&)
139 // no need to keep e in the global store_table for us; we have our own map
144 Transients::get(const cache_key
*key
)
150 const Ipc::StoreMapAnchor
*anchor
= map
->openForReading(key
, index
);
154 // If we already have a local entry, the store_table should have found it.
155 // Since it did not, the local entry key must have changed from public to
156 // private. We still need to keep the private entry around for syncing as
157 // its clients depend on it, but we should not allow new clients to join.
158 if (StoreEntry
*oldE
= locals
->at(index
)) {
159 debugs(20, 3, "not joining private " << *oldE
);
160 assert(EBIT_TEST(oldE
->flags
, KEY_PRIVATE
));
161 map
->closeForReadingAndFreeIdle(index
);
165 StoreEntry
*e
= new StoreEntry();
166 e
->createMemObject();
167 e
->mem_obj
->xitTable
.index
= index
;
168 e
->mem_obj
->xitTable
.io
= Store::ioReading
;
169 anchor
->exportInto(*e
);
170 const bool collapsingRequired
= EBIT_TEST(anchor
->basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
171 e
->setCollapsingRequirement(collapsingRequired
);
172 // keep read lock to receive updates from others
177 Transients::findCollapsed(const sfileno index
)
182 if (StoreEntry
*oldE
= locals
->at(index
)) {
183 debugs(20, 5, "found " << *oldE
<< " at " << index
<< " in " << MapLabel
);
184 assert(oldE
->mem_obj
&& oldE
->mem_obj
->xitTable
.index
== index
);
188 debugs(20, 3, "no entry at " << index
<< " in " << MapLabel
);
193 Transients::clearCollapsingRequirement(const StoreEntry
&e
)
196 assert(e
.hasTransients());
198 const auto idx
= e
.mem_obj
->xitTable
.index
;
199 auto &anchor
= map
->writeableEntry(idx
);
200 if (EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
)) {
201 EBIT_CLR(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
202 CollapsedForwarding::Broadcast(e
);
207 Transients::monitorIo(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
209 if (!e
->hasTransients()) {
210 addEntry(e
, key
, direction
);
211 assert(e
->hasTransients());
214 const auto index
= e
->mem_obj
->xitTable
.index
;
215 if (const auto old
= locals
->at(index
)) {
218 // We do not lock e because we do not want to prevent its destruction;
219 // e is tied to us via mem_obj so we will know when it is destructed.
220 locals
->at(index
) = e
;
224 /// creates a new Transients entry
226 Transients::addEntry(StoreEntry
*e
, const cache_key
*key
, const Store::IoStatus direction
)
230 assert(!e
->hasTransients());
232 Must(map
); // configured to track transients
235 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(key
, index
);
236 Must(slot
); // no writer collisions
238 // set ASAP in hope to unlock the slot if something throws
239 e
->mem_obj
->xitTable
.index
= index
;
240 e
->mem_obj
->xitTable
.io
= Store::ioWriting
;
243 if (direction
== Store::ioWriting
) {
244 // allow reading and receive remote DELETE events, but do not switch to
245 // the reading lock because transientReaders() callers want true readers
246 map
->startAppending(index
);
248 assert(direction
== Store::ioReading
);
249 // keep the entry locked (for reading) to receive remote DELETE events
250 map
->switchWritingToReading(index
);
251 e
->mem_obj
->xitTable
.io
= Store::ioReading
;
256 Transients::hasWriter(const StoreEntry
&e
)
258 if (!e
.hasTransients())
260 return map
->peekAtWriter(e
.mem_obj
->xitTable
.index
);
264 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId
)
266 // TODO: we should probably find the entry being deleted and abort it
270 Transients::status(const StoreEntry
&entry
, Transients::EntryStatus
&entryStatus
) const
273 assert(entry
.hasTransients());
274 const auto idx
= entry
.mem_obj
->xitTable
.index
;
275 const auto &anchor
= isWriter(entry
) ?
276 map
->writeableEntry(idx
) : map
->readableEntry(idx
);
277 entryStatus
.abortedByWriter
= anchor
.writerHalted
;
278 entryStatus
.waitingToBeFreed
= anchor
.waitingToBeFreed
;
279 entryStatus
.collapsed
= EBIT_TEST(anchor
.basics
.flags
, ENTRY_REQUIRES_COLLAPSING
);
283 Transients::completeWriting(const StoreEntry
&e
)
285 assert(e
.hasTransients());
287 map
->switchWritingToReading(e
.mem_obj
->xitTable
.index
);
288 e
.mem_obj
->xitTable
.io
= Store::ioReading
;
292 Transients::readers(const StoreEntry
&e
) const
294 if (e
.hasTransients()) {
296 return map
->peekAtEntry(e
.mem_obj
->xitTable
.index
).lock
.readers
;
302 Transients::evictCached(StoreEntry
&e
)
305 if (e
.hasTransients()) {
306 const auto index
= e
.mem_obj
->xitTable
.index
;
307 if (map
->freeEntry(index
)) {
308 // Delay syncCollapsed(index) which may end `e` wait for updates.
309 // Calling it directly/here creates complex reentrant call chains.
310 CollapsedForwarding::Broadcast(e
, true);
312 } // else nothing to do because e must be private
316 Transients::evictIfFound(const cache_key
*key
)
321 const sfileno index
= map
->fileNoByKey(key
);
322 if (map
->freeEntry(index
))
323 CollapsedForwarding::Broadcast(index
, true);
327 Transients::disconnect(StoreEntry
&entry
)
329 debugs(20, 5, entry
);
330 if (entry
.hasTransients()) {
331 auto &xitTable
= entry
.mem_obj
->xitTable
;
333 if (isWriter(entry
)) {
334 map
->abortWriting(xitTable
.index
);
336 assert(isReader(entry
));
337 map
->closeForReadingAndFreeIdle(xitTable
.index
);
339 locals
->at(xitTable
.index
) = nullptr;
341 xitTable
.io
= Store::ioDone
;
345 /// calculates maximum number of entries we need to store and map
347 Transients::EntryLimit()
349 return (UsingSmp() && Store::Controller::SmpAware()) ?
350 Config
.shared_transient_entries_limit
: 0;
354 Transients::markedForDeletion(const cache_key
*key
) const
357 return map
->markedForDeletion(key
);
361 Transients::isReader(const StoreEntry
&e
) const
363 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioReading
;
367 Transients::isWriter(const StoreEntry
&e
) const
369 return e
.mem_obj
&& e
.mem_obj
->xitTable
.io
== Store::ioWriting
;
372 /// initializes shared memory segment used by Transients
373 class TransientsRr
: public Ipc::Mem::RegisteredRunner
376 /* RegisteredRunner API */
377 virtual void useConfig();
378 virtual ~TransientsRr();
381 virtual void create();
384 TransientsMap::Owner
*mapOwner
= nullptr;
387 RunnerRegistrationEntry(TransientsRr
);
390 TransientsRr::useConfig()
392 assert(Config
.memShared
.configured());
393 Ipc::Mem::RegisteredRunner::useConfig();
397 TransientsRr::create()
399 const int64_t entryLimit
= Transients::EntryLimit();
401 return; // no SMP configured or a misconfiguration
404 mapOwner
= TransientsMap::Init(MapLabel
, entryLimit
);
407 TransientsRr::~TransientsRr()