]> git.ipfire.org Git - thirdparty/squid.git/blame - src/Transients.cc
Preserve caller context across tunnelDelayed*Read (#560)
[thirdparty/squid.git] / src / Transients.cc
CommitLineData
9a9954ba 1/*
77b1029d 2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
9a9954ba 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9a9954ba
AR
7 */
8
bbc27441
AJ
9/* DEBUG: section 20 Storage Manager */
10
9a9954ba
AR
11#include "squid.h"
12#include "base/RunnersRegistry.h"
e4d13993 13#include "CollapsedForwarding.h"
9a9954ba
AR
14#include "HttpReply.h"
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
9a9954ba
AR
18#include "mime_header.h"
19#include "SquidConfig.h"
20#include "SquidMath.h"
21#include "StoreStats.h"
22#include "tools.h"
e4d13993 23#include "Transients.h"
9a9954ba 24
9a9954ba 25#include <limits>
9a9954ba 26
1860fbac
AR
27/// shared memory segment path to use for Transients map
28static const SBuf MapLabel("transients_map");
9a9954ba 29
6919be24 30Transients::Transients(): map(NULL), locals(NULL)
9a9954ba 31{
9a9954ba
AR
32}
33
34Transients::~Transients()
35{
36 delete map;
6919be24 37 delete locals;
9a9954ba
AR
38}
39
40void
41Transients::init()
42{
daed75a9 43 assert(Enabled());
9a9954ba 44 const int64_t entryLimit = EntryLimit();
daed75a9 45 assert(entryLimit > 0);
9a9954ba
AR
46
47 Must(!map);
48 map = new TransientsMap(MapLabel);
49 map->cleaner = this;
6919be24 50
8bcca0f8 51 locals = new Locals(entryLimit, 0);
9a9954ba
AR
52}
53
54void
55Transients::getStats(StoreInfoStats &stats) const
56{
57#if TRANSIENT_STATS_SUPPORTED
58 const size_t pageSize = Ipc::Mem::PageSize();
59
60 stats.mem.shared = true;
61 stats.mem.capacity =
62 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
63 stats.mem.size =
64 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
65 stats.mem.count = currentCount();
66#endif
67}
68
69void
70Transients::stat(StoreEntry &e) const
71{
72 storeAppendPrintf(&e, "\n\nTransient Objects\n");
73
74 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
75 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
76 currentSize() / 1024.0,
77 Math::doublePercent(currentSize(), maxSize()));
78
79 if (map) {
80 const int limit = map->entryLimit();
81 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
82 if (limit > 0) {
83 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
84 currentCount(), (100.0 * currentCount() / limit));
85 }
86 }
87}
88
89void
90Transients::maintain()
91{
e4d13993 92 // no lazy garbage collection needed
9a9954ba
AR
93}
94
95uint64_t
96Transients::minSize() const
97{
98 return 0; // XXX: irrelevant, but Store parent forces us to implement this
99}
100
101uint64_t
102Transients::maxSize() const
103{
104 // Squid currently does not limit the total size of all transient objects
105 return std::numeric_limits<uint64_t>::max();
106}
107
108uint64_t
109Transients::currentSize() const
110{
111 // TODO: we do not get enough information to calculate this
112 // StoreEntry should update associated stores when its size changes
113 return 0;
114}
115
116uint64_t
117Transients::currentCount() const
118{
119 return map ? map->entryCount() : 0;
120}
121
122int64_t
123Transients::maxObjectSize() const
124{
125 // Squid currently does not limit the size of a transient object
126 return std::numeric_limits<uint64_t>::max();
127}
128
129void
130Transients::reference(StoreEntry &)
131{
e4d13993 132 // no replacement policy (but the cache(s) storing the entry may have one)
9a9954ba
AR
133}
134
135bool
2745fea5 136Transients::dereference(StoreEntry &)
9a9954ba
AR
137{
138 // no need to keep e in the global store_table for us; we have our own map
139 return false;
140}
141
9a9954ba
AR
142StoreEntry *
143Transients::get(const cache_key *key)
144{
145 if (!map)
146 return NULL;
147
148 sfileno index;
1bfe9ade
AR
149 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
150 if (!anchor)
9a9954ba
AR
151 return NULL;
152
6919be24
AR
153 // If we already have a local entry, the store_table should have found it.
154 // Since it did not, the local entry key must have changed from public to
155 // private. We still need to keep the private entry around for syncing as
156 // its clients depend on it, but we should not allow new clients to join.
157 if (StoreEntry *oldE = locals->at(index)) {
158 debugs(20, 3, "not joining private " << *oldE);
159 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
d1d3b4dc 160 map->closeForReadingAndFreeIdle(index);
4310f8b0 161 return nullptr;
1bfe9ade 162 }
4475555f 163
4310f8b0
EB
164 StoreEntry *e = new StoreEntry();
165 e->createMemObject();
4475555f 166 e->mem_obj->xitTable.index = index;
4310f8b0
EB
167 e->mem_obj->xitTable.io = Store::ioReading;
168 anchor->exportInto(*e);
d2a6dcba
EB
169 const bool collapsingRequired = EBIT_TEST(anchor->basics.flags, ENTRY_REQUIRES_COLLAPSING);
170 e->setCollapsingRequirement(collapsingRequired);
4310f8b0 171 // keep read lock to receive updates from others
9a9954ba
AR
172 return e;
173}
174
6919be24
AR
175StoreEntry *
176Transients::findCollapsed(const sfileno index)
177{
178 if (!map)
179 return NULL;
180
181 if (StoreEntry *oldE = locals->at(index)) {
182 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
183 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
184 return oldE;
185 }
186
187 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
188 return NULL;
189}
190
d2a6dcba
EB
191void
192Transients::clearCollapsingRequirement(const StoreEntry &e)
193{
194 assert(map);
195 assert(e.hasTransients());
196 assert(isWriter(e));
197 const auto idx = e.mem_obj->xitTable.index;
198 auto &anchor = map->writeableEntry(idx);
199 if (EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING)) {
200 EBIT_CLR(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
201 CollapsedForwarding::Broadcast(e);
202 }
203}
204
9a9954ba 205void
4310f8b0 206Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
9a9954ba 207{
4310f8b0
EB
208 if (!e->hasTransients()) {
209 addEntry(e, key, direction);
8253d451 210 assert(e->hasTransients());
9d4e9cfb 211 }
9a9954ba 212
4310f8b0
EB
213 const auto index = e->mem_obj->xitTable.index;
214 if (const auto old = locals->at(index)) {
215 assert(old == e);
216 } else {
217 // We do not lock e because we do not want to prevent its destruction;
218 // e is tied to us via mem_obj so we will know when it is destructed.
219 locals->at(index) = e;
9d4e9cfb 220 }
9a9954ba
AR
221}
222
8253d451 223/// creates a new Transients entry
4310f8b0
EB
224void
225Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
9a9954ba 226{
4310f8b0
EB
227 assert(e);
228 assert(e->mem_obj);
229 assert(!e->hasTransients());
9a9954ba 230
4310f8b0 231 Must(map); // configured to track transients
9a9954ba 232
4310f8b0
EB
233 sfileno index = 0;
234 Ipc::StoreMapAnchor *slot = map->openForWriting(key, index);
235 Must(slot); // no writer collisions
9a9954ba 236
8253d451 237 // set ASAP in hope to unlock the slot if something throws
4310f8b0 238 e->mem_obj->xitTable.index = index;
8253d451
AR
239 e->mem_obj->xitTable.io = Store::ioWriting;
240
241 slot->set(*e, key);
4310f8b0 242 if (direction == Store::ioWriting) {
8253d451
AR
243 // allow reading and receive remote DELETE events, but do not switch to
244 // the reading lock because transientReaders() callers want true readers
245 map->startAppending(index);
4310f8b0 246 } else {
8253d451 247 assert(direction == Store::ioReading);
4310f8b0 248 // keep the entry locked (for reading) to receive remote DELETE events
8253d451
AR
249 map->switchWritingToReading(index);
250 e->mem_obj->xitTable.io = Store::ioReading;
4310f8b0 251 }
9a9954ba
AR
252}
253
d1d3b4dc
EB
254bool
255Transients::hasWriter(const StoreEntry &e)
256{
257 if (!e.hasTransients())
258 return false;
259 return map->peekAtWriter(e.mem_obj->xitTable.index);
260}
261
9a9954ba 262void
ced8def3 263Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
9a9954ba
AR
264{
265 // TODO: we should probably find the entry being deleted and abort it
266}
267
4475555f 268void
d2a6dcba 269Transients::status(const StoreEntry &entry, Transients::EntryStatus &entryStatus) const
4475555f
AR
270{
271 assert(map);
4310f8b0
EB
272 assert(entry.hasTransients());
273 const auto idx = entry.mem_obj->xitTable.index;
274 const auto &anchor = isWriter(entry) ?
275 map->writeableEntry(idx) : map->readableEntry(idx);
d2a6dcba
EB
276 entryStatus.abortedByWriter = anchor.writerHalted;
277 entryStatus.waitingToBeFreed = anchor.waitingToBeFreed;
278 entryStatus.collapsed = EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
4475555f
AR
279}
280
99921d9d
AR
281void
282Transients::completeWriting(const StoreEntry &e)
283{
4310f8b0
EB
284 assert(e.hasTransients());
285 assert(isWriter(e));
8253d451 286 map->switchWritingToReading(e.mem_obj->xitTable.index);
4310f8b0 287 e.mem_obj->xitTable.io = Store::ioReading;
99921d9d
AR
288}
289
d366a7fa
AR
290int
291Transients::readers(const StoreEntry &e) const
292{
4310f8b0 293 if (e.hasTransients()) {
d366a7fa
AR
294 assert(map);
295 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
296 }
297 return 0;
298}
299
1bfe9ade 300void
4310f8b0
EB
301Transients::evictCached(StoreEntry &e)
302{
303 debugs(20, 5, e);
304 if (e.hasTransients()) {
305 const auto index = e.mem_obj->xitTable.index;
306 if (map->freeEntry(index)) {
307 // Delay syncCollapsed(index) which may end `e` wait for updates.
308 // Calling it directly/here creates complex reentrant call chains.
309 CollapsedForwarding::Broadcast(e, true);
310 }
311 } // else nothing to do because e must be private
2745fea5
AR
312}
313
314void
4310f8b0 315Transients::evictIfFound(const cache_key *key)
1bfe9ade 316{
4310f8b0
EB
317 if (!map)
318 return;
319
320 const sfileno index = map->fileNoByKey(key);
321 if (map->freeEntry(index))
322 CollapsedForwarding::Broadcast(index, true);
1bfe9ade
AR
323}
324
4475555f 325void
4310f8b0 326Transients::disconnect(StoreEntry &entry)
4475555f 327{
4310f8b0
EB
328 debugs(20, 5, entry);
329 if (entry.hasTransients()) {
330 auto &xitTable = entry.mem_obj->xitTable;
99921d9d 331 assert(map);
4310f8b0
EB
332 if (isWriter(entry)) {
333 map->abortWriting(xitTable.index);
99921d9d 334 } else {
4310f8b0 335 assert(isReader(entry));
d1d3b4dc 336 map->closeForReadingAndFreeIdle(xitTable.index);
99921d9d 337 }
4310f8b0
EB
338 locals->at(xitTable.index) = nullptr;
339 xitTable.index = -1;
340 xitTable.io = Store::ioDone;
99921d9d 341 }
4475555f
AR
342}
343
9a9954ba
AR
344/// calculates maximum number of entries we need to store and map
345int64_t
346Transients::EntryLimit()
347{
daed75a9
EB
348 return (UsingSmp() && Store::Controller::SmpAware()) ?
349 Config.shared_transient_entries_limit : 0;
9a9954ba
AR
350}
351
4310f8b0
EB
352bool
353Transients::markedForDeletion(const cache_key *key) const
354{
355 assert(map);
356 return map->markedForDeletion(key);
357}
358
359bool
360Transients::isReader(const StoreEntry &e) const
361{
362 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading;
363}
364
365bool
366Transients::isWriter(const StoreEntry &e) const
367{
368 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting;
369}
370
9a9954ba
AR
371/// initializes shared memory segment used by Transients
372class TransientsRr: public Ipc::Mem::RegisteredRunner
373{
374public:
375 /* RegisteredRunner API */
21b7990f 376 virtual void useConfig();
9a9954ba
AR
377 virtual ~TransientsRr();
378
379protected:
21b7990f 380 virtual void create();
9a9954ba
AR
381
382private:
4310f8b0 383 TransientsMap::Owner *mapOwner = nullptr;
9a9954ba
AR
384};
385
21b7990f 386RunnerRegistrationEntry(TransientsRr);
9a9954ba 387
e4d13993 388void
21b7990f 389TransientsRr::useConfig()
9a9954ba
AR
390{
391 assert(Config.memShared.configured());
21b7990f 392 Ipc::Mem::RegisteredRunner::useConfig();
9a9954ba
AR
393}
394
e4d13993 395void
21b7990f 396TransientsRr::create()
9a9954ba 397{
9a9954ba 398 const int64_t entryLimit = Transients::EntryLimit();
9a9954ba
AR
399 if (entryLimit <= 0)
400 return; // no SMP configured or a misconfiguration
401
402 Must(!mapOwner);
403 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
404}
405
406TransientsRr::~TransientsRr()
407{
408 delete mapOwner;
409}
f53969cc 410