]> git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
Source Format Enforcement (#963)
[thirdparty/squid.git] / src / Transients.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
22 #include "tools.h"
23 #include "Transients.h"
24
25 #include <limits>
26
27 /// shared memory segment path to use for Transients map
28 static const SBuf MapLabel("transients_map");
29
30 Transients::Transients(): map(NULL), locals(NULL)
31 {
32 }
33
34 Transients::~Transients()
35 {
36 delete map;
37 delete locals;
38 }
39
40 void
41 Transients::init()
42 {
43 assert(Enabled());
44 const int64_t entryLimit = EntryLimit();
45 assert(entryLimit > 0);
46
47 Must(!map);
48 map = new TransientsMap(MapLabel);
49 map->cleaner = this;
50 map->disableHitValidation(); // Transients lacks slices to validate
51
52 locals = new Locals(entryLimit, 0);
53 }
54
55 void
56 Transients::getStats(StoreInfoStats &stats) const
57 {
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize = Ipc::Mem::PageSize();
60
61 stats.mem.shared = true;
62 stats.mem.capacity =
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.size =
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
66 stats.mem.count = currentCount();
67 #else
68 (void)stats;
69 #endif
70 }
71
72 void
73 Transients::stat(StoreEntry &e) const
74 {
75 storeAppendPrintf(&e, "\n\nTransient Objects\n");
76
77 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
78 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
79 currentSize() / 1024.0,
80 Math::doublePercent(currentSize(), maxSize()));
81
82 if (map) {
83 const int limit = map->entryLimit();
84 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
85 if (limit > 0) {
86 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
87 currentCount(), (100.0 * currentCount() / limit));
88 }
89 }
90 }
91
92 void
93 Transients::maintain()
94 {
95 // no lazy garbage collection needed
96 }
97
98 uint64_t
99 Transients::minSize() const
100 {
101 return 0; // XXX: irrelevant, but Store parent forces us to implement this
102 }
103
104 uint64_t
105 Transients::maxSize() const
106 {
107 // Squid currently does not limit the total size of all transient objects
108 return std::numeric_limits<uint64_t>::max();
109 }
110
111 uint64_t
112 Transients::currentSize() const
113 {
114 // TODO: we do not get enough information to calculate this
115 // StoreEntry should update associated stores when its size changes
116 return 0;
117 }
118
119 uint64_t
120 Transients::currentCount() const
121 {
122 return map ? map->entryCount() : 0;
123 }
124
125 int64_t
126 Transients::maxObjectSize() const
127 {
128 // Squid currently does not limit the size of a transient object
129 return std::numeric_limits<uint64_t>::max();
130 }
131
132 void
133 Transients::reference(StoreEntry &)
134 {
135 // no replacement policy (but the cache(s) storing the entry may have one)
136 }
137
138 bool
139 Transients::dereference(StoreEntry &)
140 {
141 // no need to keep e in the global store_table for us; we have our own map
142 return false;
143 }
144
145 StoreEntry *
146 Transients::get(const cache_key *key)
147 {
148 if (!map)
149 return NULL;
150
151 sfileno index;
152 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
153 if (!anchor)
154 return NULL;
155
156 // If we already have a local entry, the store_table should have found it.
157 // Since it did not, the local entry key must have changed from public to
158 // private. We still need to keep the private entry around for syncing as
159 // its clients depend on it, but we should not allow new clients to join.
160 if (StoreEntry *oldE = locals->at(index)) {
161 debugs(20, 3, "not joining private " << *oldE);
162 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
163 map->closeForReadingAndFreeIdle(index);
164 return nullptr;
165 }
166
167 // store hadWriter before checking ENTRY_REQUIRES_COLLAPSING to avoid racing
168 // the writer that clears that flag and then leaves
169 const auto hadWriter = map->peekAtWriter(index);
170 if (!hadWriter && EBIT_TEST(anchor->basics.flags, ENTRY_REQUIRES_COLLAPSING)) {
171 debugs(20, 3, "not joining abandoned entry " << index);
172 map->closeForReadingAndFreeIdle(index);
173 return nullptr;
174 }
175
176 StoreEntry *e = new StoreEntry();
177 e->createMemObject();
178 anchorEntry(*e, index, *anchor);
179
180 // keep read lock to receive updates from others
181 return e;
182 }
183
184 StoreEntry *
185 Transients::findCollapsed(const sfileno index)
186 {
187 if (!map)
188 return NULL;
189
190 if (StoreEntry *oldE = locals->at(index)) {
191 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
192 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
193 return oldE;
194 }
195
196 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
197 return NULL;
198 }
199
200 void
201 Transients::clearCollapsingRequirement(const StoreEntry &e)
202 {
203 assert(map);
204 assert(e.hasTransients());
205 assert(isWriter(e));
206 const auto idx = e.mem_obj->xitTable.index;
207 auto &anchor = map->writeableEntry(idx);
208 if (EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING)) {
209 EBIT_CLR(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
210 CollapsedForwarding::Broadcast(e);
211 }
212 }
213
214 void
215 Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
216 {
217 if (!e->hasTransients()) {
218 addEntry(e, key, direction);
219 assert(e->hasTransients());
220 }
221
222 const auto index = e->mem_obj->xitTable.index;
223 if (const auto old = locals->at(index)) {
224 assert(old == e);
225 } else {
226 // We do not lock e because we do not want to prevent its destruction;
227 // e is tied to us via mem_obj so we will know when it is destructed.
228 locals->at(index) = e;
229 }
230 }
231
232 /// creates a new Transients entry
233 void
234 Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
235 {
236 assert(e);
237 assert(e->mem_obj);
238 assert(!e->hasTransients());
239
240 Must(map); // configured to track transients
241
242 if (direction == Store::ioWriting)
243 return addWriterEntry(*e, key);
244
245 assert(direction == Store::ioReading);
246 addReaderEntry(*e, key);
247 }
248
249 /// addEntry() helper used for cache entry creators/writers
250 void
251 Transients::addWriterEntry(StoreEntry &e, const cache_key *key)
252 {
253 sfileno index = 0;
254 const auto anchor = map->openForWriting(key, index);
255 if (!anchor)
256 throw TextException("writer collision", Here());
257
258 // set ASAP in hope to unlock the slot if something throws
259 // and to provide index to such methods as hasWriter()
260 auto &xitTable = e.mem_obj->xitTable;
261 xitTable.index = index;
262 xitTable.io = Store::ioWriting;
263
264 anchor->set(e, key);
265 // allow reading and receive remote DELETE events, but do not switch to
266 // the reading lock because transientReaders() callers want true readers
267 map->startAppending(index);
268 }
269
270 /// addEntry() helper used for cache readers
271 /// readers do not modify the cache, but they must create a Transients entry
272 void
273 Transients::addReaderEntry(StoreEntry &e, const cache_key *key)
274 {
275 sfileno index = 0;
276 const auto anchor = map->openOrCreateForReading(key, index, e);
277 if (!anchor)
278 throw TextException("reader collision", Here());
279
280 anchorEntry(e, index, *anchor);
281 // keep the entry locked (for reading) to receive remote DELETE events
282 }
283
284 /// fills (recently created) StoreEntry with information currently in Transients
285 void
286 Transients::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
287 {
288 // set ASAP in hope to unlock the slot if something throws
289 // and to provide index to such methods as hasWriter()
290 auto &xitTable = e.mem_obj->xitTable;
291 xitTable.index = index;
292 xitTable.io = Store::ioReading;
293
294 const auto hadWriter = hasWriter(e); // before computing collapsingRequired
295 anchor.exportInto(e);
296 const bool collapsingRequired = EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
297 assert(!collapsingRequired || hadWriter);
298 e.setCollapsingRequirement(collapsingRequired);
299 }
300
301 bool
302 Transients::hasWriter(const StoreEntry &e)
303 {
304 if (!e.hasTransients())
305 return false;
306 return map->peekAtWriter(e.mem_obj->xitTable.index);
307 }
308
309 void
310 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
311 {
312 // TODO: we should probably find the entry being deleted and abort it
313 }
314
315 void
316 Transients::status(const StoreEntry &entry, Transients::EntryStatus &entryStatus) const
317 {
318 assert(map);
319 assert(entry.hasTransients());
320 const auto idx = entry.mem_obj->xitTable.index;
321 const auto &anchor = isWriter(entry) ?
322 map->writeableEntry(idx) : map->readableEntry(idx);
323 entryStatus.abortedByWriter = anchor.writerHalted;
324 entryStatus.waitingToBeFreed = anchor.waitingToBeFreed;
325 entryStatus.collapsed = EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
326 }
327
328 void
329 Transients::completeWriting(const StoreEntry &e)
330 {
331 assert(e.hasTransients());
332 assert(isWriter(e));
333 map->switchWritingToReading(e.mem_obj->xitTable.index);
334 e.mem_obj->xitTable.io = Store::ioReading;
335 }
336
337 int
338 Transients::readers(const StoreEntry &e) const
339 {
340 if (e.hasTransients()) {
341 assert(map);
342 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
343 }
344 return 0;
345 }
346
347 void
348 Transients::evictCached(StoreEntry &e)
349 {
350 debugs(20, 5, e);
351 if (e.hasTransients()) {
352 const auto index = e.mem_obj->xitTable.index;
353 if (map->freeEntry(index)) {
354 // Delay syncCollapsed(index) which may end `e` wait for updates.
355 // Calling it directly/here creates complex reentrant call chains.
356 CollapsedForwarding::Broadcast(e, true);
357 }
358 } // else nothing to do because e must be private
359 }
360
361 void
362 Transients::evictIfFound(const cache_key *key)
363 {
364 if (!map)
365 return;
366
367 const sfileno index = map->fileNoByKey(key);
368 if (map->freeEntry(index))
369 CollapsedForwarding::Broadcast(index, true);
370 }
371
372 void
373 Transients::disconnect(StoreEntry &entry)
374 {
375 debugs(20, 5, entry);
376 if (entry.hasTransients()) {
377 auto &xitTable = entry.mem_obj->xitTable;
378 assert(map);
379 if (isWriter(entry)) {
380 map->abortWriting(xitTable.index);
381 } else {
382 assert(isReader(entry));
383 map->closeForReadingAndFreeIdle(xitTable.index);
384 }
385 locals->at(xitTable.index) = nullptr;
386 xitTable.index = -1;
387 xitTable.io = Store::ioDone;
388 }
389 }
390
391 /// calculates maximum number of entries we need to store and map
392 int64_t
393 Transients::EntryLimit()
394 {
395 return (UsingSmp() && Store::Controller::SmpAware()) ?
396 Config.shared_transient_entries_limit : 0;
397 }
398
399 bool
400 Transients::markedForDeletion(const cache_key *key) const
401 {
402 assert(map);
403 return map->markedForDeletion(key);
404 }
405
406 bool
407 Transients::isReader(const StoreEntry &e) const
408 {
409 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading;
410 }
411
412 bool
413 Transients::isWriter(const StoreEntry &e) const
414 {
415 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting;
416 }
417
418 /// initializes shared memory segment used by Transients
419 class TransientsRr: public Ipc::Mem::RegisteredRunner
420 {
421 public:
422 /* RegisteredRunner API */
423 virtual void useConfig();
424 virtual ~TransientsRr();
425
426 protected:
427 virtual void create();
428
429 private:
430 TransientsMap::Owner *mapOwner = nullptr;
431 };
432
433 RunnerRegistrationEntry(TransientsRr);
434
435 void
436 TransientsRr::useConfig()
437 {
438 assert(Config.memShared.configured());
439 Ipc::Mem::RegisteredRunner::useConfig();
440 }
441
442 void
443 TransientsRr::create()
444 {
445 const int64_t entryLimit = Transients::EntryLimit();
446 if (entryLimit <= 0)
447 return; // no SMP configured or a misconfiguration
448
449 Must(!mapOwner);
450 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
451 }
452
453 TransientsRr::~TransientsRr()
454 {
455 delete mapOwner;
456 }
457