]> git.ipfire.org Git - thirdparty/squid.git/blame - src/Transients.cc
NoNewGlobals for MapLabel (#1746)
[thirdparty/squid.git] / src / Transients.cc
CommitLineData
9a9954ba 1/*
b8ae064d 2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
9a9954ba 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9a9954ba
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 20 Storage Manager */
10
9a9954ba
AR
11#include "squid.h"
12#include "base/RunnersRegistry.h"
e4d13993 13#include "CollapsedForwarding.h"
9a9954ba
AR
14#include "HttpReply.h"
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
9a9954ba
AR
18#include "mime_header.h"
19#include "SquidConfig.h"
20#include "SquidMath.h"
21#include "StoreStats.h"
22#include "tools.h"
e4d13993 23#include "Transients.h"
9a9954ba 24
9a9954ba 25#include <limits>
9a9954ba 26
1860fbac 27/// shared memory segment path to use for Transients map
87e6fe2c
FC
28static const auto &
29MapLabel()
30{
31 static const auto label = new SBuf("transients_map");
32 return *label;
33}
9a9954ba 34
aee3523a 35Transients::Transients(): map(nullptr), locals(nullptr)
9a9954ba 36{
9a9954ba
AR
37}
38
39Transients::~Transients()
40{
41 delete map;
6919be24 42 delete locals;
9a9954ba
AR
43}
44
45void
46Transients::init()
47{
daed75a9 48 assert(Enabled());
9a9954ba 49 const int64_t entryLimit = EntryLimit();
daed75a9 50 assert(entryLimit > 0);
9a9954ba
AR
51
52 Must(!map);
87e6fe2c 53 map = new TransientsMap(MapLabel());
9a9954ba 54 map->cleaner = this;
b2aca62a 55 map->disableHitValidation(); // Transients lacks slices to validate
6919be24 56
aee3523a 57 locals = new Locals(entryLimit, nullptr);
9a9954ba
AR
58}
59
60void
61Transients::getStats(StoreInfoStats &stats) const
62{
63#if TRANSIENT_STATS_SUPPORTED
64 const size_t pageSize = Ipc::Mem::PageSize();
65
66 stats.mem.shared = true;
67 stats.mem.capacity =
68 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
69 stats.mem.size =
70 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
71 stats.mem.count = currentCount();
8b082ed9
FC
72#else
73 (void)stats;
9a9954ba
AR
74#endif
75}
76
77void
78Transients::stat(StoreEntry &e) const
79{
80 storeAppendPrintf(&e, "\n\nTransient Objects\n");
81
82 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
83 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
84 currentSize() / 1024.0,
85 Math::doublePercent(currentSize(), maxSize()));
86
87 if (map) {
88 const int limit = map->entryLimit();
89 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
90 if (limit > 0) {
91 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
92 currentCount(), (100.0 * currentCount() / limit));
93 }
94 }
95}
96
97void
98Transients::maintain()
99{
e4d13993 100 // no lazy garbage collection needed
9a9954ba
AR
101}
102
103uint64_t
104Transients::minSize() const
105{
106 return 0; // XXX: irrelevant, but Store parent forces us to implement this
107}
108
109uint64_t
110Transients::maxSize() const
111{
112 // Squid currently does not limit the total size of all transient objects
113 return std::numeric_limits<uint64_t>::max();
114}
115
116uint64_t
117Transients::currentSize() const
118{
119 // TODO: we do not get enough information to calculate this
120 // StoreEntry should update associated stores when its size changes
121 return 0;
122}
123
124uint64_t
125Transients::currentCount() const
126{
127 return map ? map->entryCount() : 0;
128}
129
130int64_t
131Transients::maxObjectSize() const
132{
133 // Squid currently does not limit the size of a transient object
134 return std::numeric_limits<uint64_t>::max();
135}
136
137void
138Transients::reference(StoreEntry &)
139{
e4d13993 140 // no replacement policy (but the cache(s) storing the entry may have one)
9a9954ba
AR
141}
142
143bool
2745fea5 144Transients::dereference(StoreEntry &)
9a9954ba
AR
145{
146 // no need to keep e in the global store_table for us; we have our own map
147 return false;
148}
149
9a9954ba
AR
150StoreEntry *
151Transients::get(const cache_key *key)
152{
153 if (!map)
aee3523a 154 return nullptr;
9a9954ba
AR
155
156 sfileno index;
1bfe9ade
AR
157 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
158 if (!anchor)
aee3523a 159 return nullptr;
9a9954ba 160
6919be24
AR
161 // If we already have a local entry, the store_table should have found it.
162 // Since it did not, the local entry key must have changed from public to
163 // private. We still need to keep the private entry around for syncing as
164 // its clients depend on it, but we should not allow new clients to join.
165 if (StoreEntry *oldE = locals->at(index)) {
166 debugs(20, 3, "not joining private " << *oldE);
167 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
d1d3b4dc 168 map->closeForReadingAndFreeIdle(index);
4310f8b0 169 return nullptr;
1bfe9ade 170 }
4475555f 171
4310f8b0
EB
172 StoreEntry *e = new StoreEntry();
173 e->createMemObject();
dbedb5b9 174 e->mem_obj->xitTable.open(index, Store::ioReading);
6c8cbe63 175
4310f8b0 176 // keep read lock to receive updates from others
9a9954ba
AR
177 return e;
178}
179
6919be24
AR
180StoreEntry *
181Transients::findCollapsed(const sfileno index)
182{
183 if (!map)
aee3523a 184 return nullptr;
6919be24
AR
185
186 if (StoreEntry *oldE = locals->at(index)) {
87e6fe2c 187 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel());
6919be24
AR
188 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
189 return oldE;
190 }
191
87e6fe2c 192 debugs(20, 3, "no entry at " << index << " in " << MapLabel());
aee3523a 193 return nullptr;
6919be24
AR
194}
195
9a9954ba 196void
4310f8b0 197Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
9a9954ba 198{
4310f8b0
EB
199 if (!e->hasTransients()) {
200 addEntry(e, key, direction);
8253d451 201 assert(e->hasTransients());
9d4e9cfb 202 }
9a9954ba 203
4310f8b0
EB
204 const auto index = e->mem_obj->xitTable.index;
205 if (const auto old = locals->at(index)) {
206 assert(old == e);
207 } else {
208 // We do not lock e because we do not want to prevent its destruction;
209 // e is tied to us via mem_obj so we will know when it is destructed.
210 locals->at(index) = e;
9d4e9cfb 211 }
9a9954ba
AR
212}
213
8253d451 214/// creates a new Transients entry
4310f8b0
EB
215void
216Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
9a9954ba 217{
4310f8b0
EB
218 assert(e);
219 assert(e->mem_obj);
220 assert(!e->hasTransients());
9a9954ba 221
4310f8b0 222 Must(map); // configured to track transients
9a9954ba 223
5bd8ce13
AR
224 if (direction == Store::ioWriting)
225 return addWriterEntry(*e, key);
226
227 assert(direction == Store::ioReading);
228 addReaderEntry(*e, key);
229}
230
231/// addEntry() helper used for cache entry creators/writers
232void
233Transients::addWriterEntry(StoreEntry &e, const cache_key *key)
234{
4310f8b0 235 sfileno index = 0;
5bd8ce13
AR
236 const auto anchor = map->openForWriting(key, index);
237 if (!anchor)
238 throw TextException("writer collision", Here());
9a9954ba 239
8253d451 240 // set ASAP in hope to unlock the slot if something throws
5bd8ce13 241 // and to provide index to such methods as hasWriter()
dbedb5b9 242 e.mem_obj->xitTable.open(index, Store::ioWriting);
5bd8ce13 243
dbedb5b9 244 anchor->setKey(key);
5bd8ce13
AR
245 // allow reading and receive remote DELETE events, but do not switch to
246 // the reading lock because transientReaders() callers want true readers
247 map->startAppending(index);
248}
249
250/// addEntry() helper used for cache readers
251/// readers do not modify the cache, but they must create a Transients entry
252void
253Transients::addReaderEntry(StoreEntry &e, const cache_key *key)
254{
255 sfileno index = 0;
dbedb5b9 256 const auto anchor = map->openOrCreateForReading(key, index);
5bd8ce13
AR
257 if (!anchor)
258 throw TextException("reader collision", Here());
259
dbedb5b9 260 e.mem_obj->xitTable.open(index, Store::ioReading);
5bd8ce13
AR
261 // keep the entry locked (for reading) to receive remote DELETE events
262}
263
d1d3b4dc
EB
264bool
265Transients::hasWriter(const StoreEntry &e)
266{
267 if (!e.hasTransients())
268 return false;
269 return map->peekAtWriter(e.mem_obj->xitTable.index);
270}
271
9a9954ba 272void
ced8def3 273Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
9a9954ba
AR
274{
275 // TODO: we should probably find the entry being deleted and abort it
276}
277
4475555f 278void
d2a6dcba 279Transients::status(const StoreEntry &entry, Transients::EntryStatus &entryStatus) const
4475555f
AR
280{
281 assert(map);
4310f8b0
EB
282 assert(entry.hasTransients());
283 const auto idx = entry.mem_obj->xitTable.index;
284 const auto &anchor = isWriter(entry) ?
285 map->writeableEntry(idx) : map->readableEntry(idx);
24c93780 286 entryStatus.hasWriter = anchor.writing();
d2a6dcba 287 entryStatus.waitingToBeFreed = anchor.waitingToBeFreed;
4475555f
AR
288}
289
99921d9d
AR
290void
291Transients::completeWriting(const StoreEntry &e)
292{
24c93780 293 debugs(20, 5, e);
4310f8b0
EB
294 assert(e.hasTransients());
295 assert(isWriter(e));
8253d451 296 map->switchWritingToReading(e.mem_obj->xitTable.index);
4310f8b0 297 e.mem_obj->xitTable.io = Store::ioReading;
24c93780 298 CollapsedForwarding::Broadcast(e);
99921d9d
AR
299}
300
d366a7fa
AR
301int
302Transients::readers(const StoreEntry &e) const
303{
4310f8b0 304 if (e.hasTransients()) {
d366a7fa
AR
305 assert(map);
306 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
307 }
308 return 0;
309}
310
1bfe9ade 311void
4310f8b0
EB
312Transients::evictCached(StoreEntry &e)
313{
314 debugs(20, 5, e);
315 if (e.hasTransients()) {
316 const auto index = e.mem_obj->xitTable.index;
317 if (map->freeEntry(index)) {
318 // Delay syncCollapsed(index) which may end `e` wait for updates.
319 // Calling it directly/here creates complex reentrant call chains.
320 CollapsedForwarding::Broadcast(e, true);
321 }
322 } // else nothing to do because e must be private
2745fea5
AR
323}
324
325void
4310f8b0 326Transients::evictIfFound(const cache_key *key)
1bfe9ade 327{
4310f8b0
EB
328 if (!map)
329 return;
330
331 const sfileno index = map->fileNoByKey(key);
332 if (map->freeEntry(index))
333 CollapsedForwarding::Broadcast(index, true);
1bfe9ade
AR
334}
335
4475555f 336void
4310f8b0 337Transients::disconnect(StoreEntry &entry)
4475555f 338{
4310f8b0
EB
339 debugs(20, 5, entry);
340 if (entry.hasTransients()) {
341 auto &xitTable = entry.mem_obj->xitTable;
99921d9d 342 assert(map);
4310f8b0 343 if (isWriter(entry)) {
24c93780
AR
344 // completeWriting() was not called, so there could be an active
345 // Store writer out there, but we should not abortWriting() here
346 // because another writer may have succeeded, making readers happy.
347 // If none succeeded, the readers will notice the lack of writers.
348 map->closeForWriting(xitTable.index);
349 CollapsedForwarding::Broadcast(entry);
99921d9d 350 } else {
4310f8b0 351 assert(isReader(entry));
d1d3b4dc 352 map->closeForReadingAndFreeIdle(xitTable.index);
99921d9d 353 }
4310f8b0 354 locals->at(xitTable.index) = nullptr;
dbedb5b9 355 xitTable.close();
99921d9d 356 }
4475555f
AR
357}
358
9a9954ba
AR
359/// calculates maximum number of entries we need to store and map
360int64_t
361Transients::EntryLimit()
362{
daed75a9
EB
363 return (UsingSmp() && Store::Controller::SmpAware()) ?
364 Config.shared_transient_entries_limit : 0;
9a9954ba
AR
365}
366
4310f8b0
EB
367bool
368Transients::markedForDeletion(const cache_key *key) const
369{
370 assert(map);
371 return map->markedForDeletion(key);
372}
373
374bool
375Transients::isReader(const StoreEntry &e) const
376{
377 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading;
378}
379
380bool
381Transients::isWriter(const StoreEntry &e) const
382{
383 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting;
384}
385
9a9954ba
AR
386/// initializes shared memory segment used by Transients
387class TransientsRr: public Ipc::Mem::RegisteredRunner
388{
389public:
390 /* RegisteredRunner API */
337b9aa4
AR
391 void useConfig() override;
392 ~TransientsRr() override;
9a9954ba
AR
393
394protected:
337b9aa4 395 void create() override;
9a9954ba
AR
396
397private:
4310f8b0 398 TransientsMap::Owner *mapOwner = nullptr;
9a9954ba
AR
399};
400
230d4410 401DefineRunnerRegistrator(TransientsRr);
9a9954ba 402
e4d13993 403void
21b7990f 404TransientsRr::useConfig()
9a9954ba
AR
405{
406 assert(Config.memShared.configured());
21b7990f 407 Ipc::Mem::RegisteredRunner::useConfig();
9a9954ba
AR
408}
409
e4d13993 410void
21b7990f 411TransientsRr::create()
9a9954ba 412{
9a9954ba 413 const int64_t entryLimit = Transients::EntryLimit();
9a9954ba
AR
414 if (entryLimit <= 0)
415 return; // no SMP configured or a misconfiguration
416
417 Must(!mapOwner);
87e6fe2c 418 mapOwner = TransientsMap::Init(MapLabel(), entryLimit);
9a9954ba
AR
419}
420
421TransientsRr::~TransientsRr()
422{
423 delete mapOwner;
424}
f53969cc 425