]> git.ipfire.org Git - thirdparty/squid.git/blob - src/Transients.cc
paranoid_hit_validation directive (#559)
[thirdparty/squid.git] / src / Transients.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Storage Manager */
10
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "SquidConfig.h"
20 #include "SquidMath.h"
21 #include "StoreStats.h"
22 #include "tools.h"
23 #include "Transients.h"
24
25 #include <limits>
26
27 /// shared memory segment path to use for Transients map
28 static const SBuf MapLabel("transients_map");
29
30 Transients::Transients(): map(NULL), locals(NULL)
31 {
32 }
33
34 Transients::~Transients()
35 {
36 delete map;
37 delete locals;
38 }
39
40 void
41 Transients::init()
42 {
43 assert(Enabled());
44 const int64_t entryLimit = EntryLimit();
45 assert(entryLimit > 0);
46
47 Must(!map);
48 map = new TransientsMap(MapLabel);
49 map->cleaner = this;
50 map->disableHitValidation(); // Transients lacks slices to validate
51
52 locals = new Locals(entryLimit, 0);
53 }
54
55 void
56 Transients::getStats(StoreInfoStats &stats) const
57 {
58 #if TRANSIENT_STATS_SUPPORTED
59 const size_t pageSize = Ipc::Mem::PageSize();
60
61 stats.mem.shared = true;
62 stats.mem.capacity =
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.size =
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
66 stats.mem.count = currentCount();
67 #endif
68 }
69
70 void
71 Transients::stat(StoreEntry &e) const
72 {
73 storeAppendPrintf(&e, "\n\nTransient Objects\n");
74
75 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
76 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
77 currentSize() / 1024.0,
78 Math::doublePercent(currentSize(), maxSize()));
79
80 if (map) {
81 const int limit = map->entryLimit();
82 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
83 if (limit > 0) {
84 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
85 currentCount(), (100.0 * currentCount() / limit));
86 }
87 }
88 }
89
90 void
91 Transients::maintain()
92 {
93 // no lazy garbage collection needed
94 }
95
96 uint64_t
97 Transients::minSize() const
98 {
99 return 0; // XXX: irrelevant, but Store parent forces us to implement this
100 }
101
102 uint64_t
103 Transients::maxSize() const
104 {
105 // Squid currently does not limit the total size of all transient objects
106 return std::numeric_limits<uint64_t>::max();
107 }
108
109 uint64_t
110 Transients::currentSize() const
111 {
112 // TODO: we do not get enough information to calculate this
113 // StoreEntry should update associated stores when its size changes
114 return 0;
115 }
116
117 uint64_t
118 Transients::currentCount() const
119 {
120 return map ? map->entryCount() : 0;
121 }
122
123 int64_t
124 Transients::maxObjectSize() const
125 {
126 // Squid currently does not limit the size of a transient object
127 return std::numeric_limits<uint64_t>::max();
128 }
129
130 void
131 Transients::reference(StoreEntry &)
132 {
133 // no replacement policy (but the cache(s) storing the entry may have one)
134 }
135
136 bool
137 Transients::dereference(StoreEntry &)
138 {
139 // no need to keep e in the global store_table for us; we have our own map
140 return false;
141 }
142
143 StoreEntry *
144 Transients::get(const cache_key *key)
145 {
146 if (!map)
147 return NULL;
148
149 sfileno index;
150 const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index);
151 if (!anchor)
152 return NULL;
153
154 // If we already have a local entry, the store_table should have found it.
155 // Since it did not, the local entry key must have changed from public to
156 // private. We still need to keep the private entry around for syncing as
157 // its clients depend on it, but we should not allow new clients to join.
158 if (StoreEntry *oldE = locals->at(index)) {
159 debugs(20, 3, "not joining private " << *oldE);
160 assert(EBIT_TEST(oldE->flags, KEY_PRIVATE));
161 map->closeForReadingAndFreeIdle(index);
162 return nullptr;
163 }
164
165 StoreEntry *e = new StoreEntry();
166 e->createMemObject();
167 e->mem_obj->xitTable.index = index;
168 e->mem_obj->xitTable.io = Store::ioReading;
169 anchor->exportInto(*e);
170 const bool collapsingRequired = EBIT_TEST(anchor->basics.flags, ENTRY_REQUIRES_COLLAPSING);
171 e->setCollapsingRequirement(collapsingRequired);
172 // keep read lock to receive updates from others
173 return e;
174 }
175
176 StoreEntry *
177 Transients::findCollapsed(const sfileno index)
178 {
179 if (!map)
180 return NULL;
181
182 if (StoreEntry *oldE = locals->at(index)) {
183 debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel);
184 assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index);
185 return oldE;
186 }
187
188 debugs(20, 3, "no entry at " << index << " in " << MapLabel);
189 return NULL;
190 }
191
192 void
193 Transients::clearCollapsingRequirement(const StoreEntry &e)
194 {
195 assert(map);
196 assert(e.hasTransients());
197 assert(isWriter(e));
198 const auto idx = e.mem_obj->xitTable.index;
199 auto &anchor = map->writeableEntry(idx);
200 if (EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING)) {
201 EBIT_CLR(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
202 CollapsedForwarding::Broadcast(e);
203 }
204 }
205
206 void
207 Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
208 {
209 if (!e->hasTransients()) {
210 addEntry(e, key, direction);
211 assert(e->hasTransients());
212 }
213
214 const auto index = e->mem_obj->xitTable.index;
215 if (const auto old = locals->at(index)) {
216 assert(old == e);
217 } else {
218 // We do not lock e because we do not want to prevent its destruction;
219 // e is tied to us via mem_obj so we will know when it is destructed.
220 locals->at(index) = e;
221 }
222 }
223
224 /// creates a new Transients entry
225 void
226 Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction)
227 {
228 assert(e);
229 assert(e->mem_obj);
230 assert(!e->hasTransients());
231
232 Must(map); // configured to track transients
233
234 sfileno index = 0;
235 Ipc::StoreMapAnchor *slot = map->openForWriting(key, index);
236 Must(slot); // no writer collisions
237
238 // set ASAP in hope to unlock the slot if something throws
239 e->mem_obj->xitTable.index = index;
240 e->mem_obj->xitTable.io = Store::ioWriting;
241
242 slot->set(*e, key);
243 if (direction == Store::ioWriting) {
244 // allow reading and receive remote DELETE events, but do not switch to
245 // the reading lock because transientReaders() callers want true readers
246 map->startAppending(index);
247 } else {
248 assert(direction == Store::ioReading);
249 // keep the entry locked (for reading) to receive remote DELETE events
250 map->switchWritingToReading(index);
251 e->mem_obj->xitTable.io = Store::ioReading;
252 }
253 }
254
255 bool
256 Transients::hasWriter(const StoreEntry &e)
257 {
258 if (!e.hasTransients())
259 return false;
260 return map->peekAtWriter(e.mem_obj->xitTable.index);
261 }
262
263 void
264 Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId)
265 {
266 // TODO: we should probably find the entry being deleted and abort it
267 }
268
269 void
270 Transients::status(const StoreEntry &entry, Transients::EntryStatus &entryStatus) const
271 {
272 assert(map);
273 assert(entry.hasTransients());
274 const auto idx = entry.mem_obj->xitTable.index;
275 const auto &anchor = isWriter(entry) ?
276 map->writeableEntry(idx) : map->readableEntry(idx);
277 entryStatus.abortedByWriter = anchor.writerHalted;
278 entryStatus.waitingToBeFreed = anchor.waitingToBeFreed;
279 entryStatus.collapsed = EBIT_TEST(anchor.basics.flags, ENTRY_REQUIRES_COLLAPSING);
280 }
281
282 void
283 Transients::completeWriting(const StoreEntry &e)
284 {
285 assert(e.hasTransients());
286 assert(isWriter(e));
287 map->switchWritingToReading(e.mem_obj->xitTable.index);
288 e.mem_obj->xitTable.io = Store::ioReading;
289 }
290
291 int
292 Transients::readers(const StoreEntry &e) const
293 {
294 if (e.hasTransients()) {
295 assert(map);
296 return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers;
297 }
298 return 0;
299 }
300
301 void
302 Transients::evictCached(StoreEntry &e)
303 {
304 debugs(20, 5, e);
305 if (e.hasTransients()) {
306 const auto index = e.mem_obj->xitTable.index;
307 if (map->freeEntry(index)) {
308 // Delay syncCollapsed(index) which may end `e` wait for updates.
309 // Calling it directly/here creates complex reentrant call chains.
310 CollapsedForwarding::Broadcast(e, true);
311 }
312 } // else nothing to do because e must be private
313 }
314
315 void
316 Transients::evictIfFound(const cache_key *key)
317 {
318 if (!map)
319 return;
320
321 const sfileno index = map->fileNoByKey(key);
322 if (map->freeEntry(index))
323 CollapsedForwarding::Broadcast(index, true);
324 }
325
326 void
327 Transients::disconnect(StoreEntry &entry)
328 {
329 debugs(20, 5, entry);
330 if (entry.hasTransients()) {
331 auto &xitTable = entry.mem_obj->xitTable;
332 assert(map);
333 if (isWriter(entry)) {
334 map->abortWriting(xitTable.index);
335 } else {
336 assert(isReader(entry));
337 map->closeForReadingAndFreeIdle(xitTable.index);
338 }
339 locals->at(xitTable.index) = nullptr;
340 xitTable.index = -1;
341 xitTable.io = Store::ioDone;
342 }
343 }
344
345 /// calculates maximum number of entries we need to store and map
346 int64_t
347 Transients::EntryLimit()
348 {
349 return (UsingSmp() && Store::Controller::SmpAware()) ?
350 Config.shared_transient_entries_limit : 0;
351 }
352
353 bool
354 Transients::markedForDeletion(const cache_key *key) const
355 {
356 assert(map);
357 return map->markedForDeletion(key);
358 }
359
360 bool
361 Transients::isReader(const StoreEntry &e) const
362 {
363 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading;
364 }
365
366 bool
367 Transients::isWriter(const StoreEntry &e) const
368 {
369 return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting;
370 }
371
372 /// initializes shared memory segment used by Transients
373 class TransientsRr: public Ipc::Mem::RegisteredRunner
374 {
375 public:
376 /* RegisteredRunner API */
377 virtual void useConfig();
378 virtual ~TransientsRr();
379
380 protected:
381 virtual void create();
382
383 private:
384 TransientsMap::Owner *mapOwner = nullptr;
385 };
386
387 RunnerRegistrationEntry(TransientsRr);
388
389 void
390 TransientsRr::useConfig()
391 {
392 assert(Config.memShared.configured());
393 Ipc::Mem::RegisteredRunner::useConfig();
394 }
395
396 void
397 TransientsRr::create()
398 {
399 const int64_t entryLimit = Transients::EntryLimit();
400 if (entryLimit <= 0)
401 return; // no SMP configured or a misconfiguration
402
403 Must(!mapOwner);
404 mapOwner = TransientsMap::Init(MapLabel, entryLimit);
405 }
406
407 TransientsRr::~TransientsRr()
408 {
409 delete mapOwner;
410 }
411