]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1996-2023 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 20 Storage Manager */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "base/RunnersRegistry.h" | |
13 | #include "CollapsedForwarding.h" | |
14 | #include "HttpReply.h" | |
15 | #include "ipc/mem/Page.h" | |
16 | #include "ipc/mem/Pages.h" | |
17 | #include "MemObject.h" | |
18 | #include "mime_header.h" | |
19 | #include "SquidConfig.h" | |
20 | #include "SquidMath.h" | |
21 | #include "StoreStats.h" | |
22 | #include "tools.h" | |
23 | #include "Transients.h" | |
24 | ||
25 | #include <limits> | |
26 | ||
27 | /// shared memory segment path to use for Transients map | |
28 | static const auto & | |
29 | MapLabel() | |
30 | { | |
31 | static const auto label = new SBuf("transients_map"); | |
32 | return *label; | |
33 | } | |
34 | ||
35 | Transients::Transients(): map(nullptr), locals(nullptr) | |
36 | { | |
37 | } | |
38 | ||
39 | Transients::~Transients() | |
40 | { | |
41 | delete map; | |
42 | delete locals; | |
43 | } | |
44 | ||
45 | void | |
46 | Transients::init() | |
47 | { | |
48 | assert(Enabled()); | |
49 | const int64_t entryLimit = EntryLimit(); | |
50 | assert(entryLimit > 0); | |
51 | ||
52 | Must(!map); | |
53 | map = new TransientsMap(MapLabel()); | |
54 | map->cleaner = this; | |
55 | map->disableHitValidation(); // Transients lacks slices to validate | |
56 | ||
57 | locals = new Locals(entryLimit, nullptr); | |
58 | } | |
59 | ||
60 | void | |
61 | Transients::getStats(StoreInfoStats &stats) const | |
62 | { | |
63 | #if TRANSIENT_STATS_SUPPORTED | |
64 | const size_t pageSize = Ipc::Mem::PageSize(); | |
65 | ||
66 | stats.mem.shared = true; | |
67 | stats.mem.capacity = | |
68 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
69 | stats.mem.size = | |
70 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
71 | stats.mem.count = currentCount(); | |
72 | #else | |
73 | (void)stats; | |
74 | #endif | |
75 | } | |
76 | ||
77 | void | |
78 | Transients::stat(StoreEntry &e) const | |
79 | { | |
80 | storeAppendPrintf(&e, "\n\nTransient Objects\n"); | |
81 | ||
82 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); | |
83 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
84 | currentSize() / 1024.0, | |
85 | Math::doublePercent(currentSize(), maxSize())); | |
86 | ||
87 | if (map) { | |
88 | const int limit = map->entryLimit(); | |
89 | storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); | |
90 | if (limit > 0) { | |
91 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", | |
92 | currentCount(), (100.0 * currentCount() / limit)); | |
93 | } | |
94 | } | |
95 | } | |
96 | ||
97 | void | |
98 | Transients::maintain() | |
99 | { | |
100 | // no lazy garbage collection needed | |
101 | } | |
102 | ||
103 | uint64_t | |
104 | Transients::minSize() const | |
105 | { | |
106 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
107 | } | |
108 | ||
109 | uint64_t | |
110 | Transients::maxSize() const | |
111 | { | |
112 | // Squid currently does not limit the total size of all transient objects | |
113 | return std::numeric_limits<uint64_t>::max(); | |
114 | } | |
115 | ||
116 | uint64_t | |
117 | Transients::currentSize() const | |
118 | { | |
119 | // TODO: we do not get enough information to calculate this | |
120 | // StoreEntry should update associated stores when its size changes | |
121 | return 0; | |
122 | } | |
123 | ||
124 | uint64_t | |
125 | Transients::currentCount() const | |
126 | { | |
127 | return map ? map->entryCount() : 0; | |
128 | } | |
129 | ||
130 | int64_t | |
131 | Transients::maxObjectSize() const | |
132 | { | |
133 | // Squid currently does not limit the size of a transient object | |
134 | return std::numeric_limits<uint64_t>::max(); | |
135 | } | |
136 | ||
137 | void | |
138 | Transients::reference(StoreEntry &) | |
139 | { | |
140 | // no replacement policy (but the cache(s) storing the entry may have one) | |
141 | } | |
142 | ||
143 | bool | |
144 | Transients::dereference(StoreEntry &) | |
145 | { | |
146 | // no need to keep e in the global store_table for us; we have our own map | |
147 | return false; | |
148 | } | |
149 | ||
150 | StoreEntry * | |
151 | Transients::get(const cache_key *key) | |
152 | { | |
153 | if (!map) | |
154 | return nullptr; | |
155 | ||
156 | sfileno index; | |
157 | const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index); | |
158 | if (!anchor) | |
159 | return nullptr; | |
160 | ||
161 | // If we already have a local entry, the store_table should have found it. | |
162 | // Since it did not, the local entry key must have changed from public to | |
163 | // private. We still need to keep the private entry around for syncing as | |
164 | // its clients depend on it, but we should not allow new clients to join. | |
165 | if (StoreEntry *oldE = locals->at(index)) { | |
166 | debugs(20, 3, "not joining private " << *oldE); | |
167 | assert(EBIT_TEST(oldE->flags, KEY_PRIVATE)); | |
168 | map->closeForReadingAndFreeIdle(index); | |
169 | return nullptr; | |
170 | } | |
171 | ||
172 | StoreEntry *e = new StoreEntry(); | |
173 | e->createMemObject(); | |
174 | e->mem_obj->xitTable.open(index, Store::ioReading); | |
175 | ||
176 | // keep read lock to receive updates from others | |
177 | return e; | |
178 | } | |
179 | ||
180 | StoreEntry * | |
181 | Transients::findCollapsed(const sfileno index) | |
182 | { | |
183 | if (!map) | |
184 | return nullptr; | |
185 | ||
186 | if (StoreEntry *oldE = locals->at(index)) { | |
187 | debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel()); | |
188 | assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index); | |
189 | return oldE; | |
190 | } | |
191 | ||
192 | debugs(20, 3, "no entry at " << index << " in " << MapLabel()); | |
193 | return nullptr; | |
194 | } | |
195 | ||
196 | void | |
197 | Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction) | |
198 | { | |
199 | if (!e->hasTransients()) { | |
200 | addEntry(e, key, direction); | |
201 | assert(e->hasTransients()); | |
202 | } | |
203 | ||
204 | const auto index = e->mem_obj->xitTable.index; | |
205 | if (const auto old = locals->at(index)) { | |
206 | assert(old == e); | |
207 | } else { | |
208 | // We do not lock e because we do not want to prevent its destruction; | |
209 | // e is tied to us via mem_obj so we will know when it is destructed. | |
210 | locals->at(index) = e; | |
211 | } | |
212 | } | |
213 | ||
214 | /// creates a new Transients entry | |
215 | void | |
216 | Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction) | |
217 | { | |
218 | assert(e); | |
219 | assert(e->mem_obj); | |
220 | assert(!e->hasTransients()); | |
221 | ||
222 | Must(map); // configured to track transients | |
223 | ||
224 | if (direction == Store::ioWriting) | |
225 | return addWriterEntry(*e, key); | |
226 | ||
227 | assert(direction == Store::ioReading); | |
228 | addReaderEntry(*e, key); | |
229 | } | |
230 | ||
231 | /// addEntry() helper used for cache entry creators/writers | |
232 | void | |
233 | Transients::addWriterEntry(StoreEntry &e, const cache_key *key) | |
234 | { | |
235 | sfileno index = 0; | |
236 | const auto anchor = map->openForWriting(key, index); | |
237 | if (!anchor) | |
238 | throw TextException("writer collision", Here()); | |
239 | ||
240 | // set ASAP in hope to unlock the slot if something throws | |
241 | // and to provide index to such methods as hasWriter() | |
242 | e.mem_obj->xitTable.open(index, Store::ioWriting); | |
243 | ||
244 | anchor->setKey(key); | |
245 | // allow reading and receive remote DELETE events, but do not switch to | |
246 | // the reading lock because transientReaders() callers want true readers | |
247 | map->startAppending(index); | |
248 | } | |
249 | ||
250 | /// addEntry() helper used for cache readers | |
251 | /// readers do not modify the cache, but they must create a Transients entry | |
252 | void | |
253 | Transients::addReaderEntry(StoreEntry &e, const cache_key *key) | |
254 | { | |
255 | sfileno index = 0; | |
256 | const auto anchor = map->openOrCreateForReading(key, index); | |
257 | if (!anchor) | |
258 | throw TextException("reader collision", Here()); | |
259 | ||
260 | e.mem_obj->xitTable.open(index, Store::ioReading); | |
261 | // keep the entry locked (for reading) to receive remote DELETE events | |
262 | } | |
263 | ||
264 | bool | |
265 | Transients::hasWriter(const StoreEntry &e) | |
266 | { | |
267 | if (!e.hasTransients()) | |
268 | return false; | |
269 | return map->peekAtWriter(e.mem_obj->xitTable.index); | |
270 | } | |
271 | ||
272 | void | |
273 | Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId) | |
274 | { | |
275 | // TODO: we should probably find the entry being deleted and abort it | |
276 | } | |
277 | ||
278 | void | |
279 | Transients::status(const StoreEntry &entry, Transients::EntryStatus &entryStatus) const | |
280 | { | |
281 | assert(map); | |
282 | assert(entry.hasTransients()); | |
283 | const auto idx = entry.mem_obj->xitTable.index; | |
284 | const auto &anchor = isWriter(entry) ? | |
285 | map->writeableEntry(idx) : map->readableEntry(idx); | |
286 | entryStatus.hasWriter = anchor.writing(); | |
287 | entryStatus.waitingToBeFreed = anchor.waitingToBeFreed; | |
288 | } | |
289 | ||
290 | void | |
291 | Transients::completeWriting(const StoreEntry &e) | |
292 | { | |
293 | debugs(20, 5, e); | |
294 | assert(e.hasTransients()); | |
295 | assert(isWriter(e)); | |
296 | map->switchWritingToReading(e.mem_obj->xitTable.index); | |
297 | e.mem_obj->xitTable.io = Store::ioReading; | |
298 | CollapsedForwarding::Broadcast(e); | |
299 | } | |
300 | ||
301 | int | |
302 | Transients::readers(const StoreEntry &e) const | |
303 | { | |
304 | if (e.hasTransients()) { | |
305 | assert(map); | |
306 | return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers; | |
307 | } | |
308 | return 0; | |
309 | } | |
310 | ||
311 | void | |
312 | Transients::evictCached(StoreEntry &e) | |
313 | { | |
314 | debugs(20, 5, e); | |
315 | if (e.hasTransients()) { | |
316 | const auto index = e.mem_obj->xitTable.index; | |
317 | if (map->freeEntry(index)) { | |
318 | // Delay syncCollapsed(index) which may end `e` wait for updates. | |
319 | // Calling it directly/here creates complex reentrant call chains. | |
320 | CollapsedForwarding::Broadcast(e, true); | |
321 | } | |
322 | } // else nothing to do because e must be private | |
323 | } | |
324 | ||
325 | void | |
326 | Transients::evictIfFound(const cache_key *key) | |
327 | { | |
328 | if (!map) | |
329 | return; | |
330 | ||
331 | const sfileno index = map->fileNoByKey(key); | |
332 | if (map->freeEntry(index)) | |
333 | CollapsedForwarding::Broadcast(index, true); | |
334 | } | |
335 | ||
336 | void | |
337 | Transients::disconnect(StoreEntry &entry) | |
338 | { | |
339 | debugs(20, 5, entry); | |
340 | if (entry.hasTransients()) { | |
341 | auto &xitTable = entry.mem_obj->xitTable; | |
342 | assert(map); | |
343 | if (isWriter(entry)) { | |
344 | // completeWriting() was not called, so there could be an active | |
345 | // Store writer out there, but we should not abortWriting() here | |
346 | // because another writer may have succeeded, making readers happy. | |
347 | // If none succeeded, the readers will notice the lack of writers. | |
348 | map->closeForWriting(xitTable.index); | |
349 | CollapsedForwarding::Broadcast(entry); | |
350 | } else { | |
351 | assert(isReader(entry)); | |
352 | map->closeForReadingAndFreeIdle(xitTable.index); | |
353 | } | |
354 | locals->at(xitTable.index) = nullptr; | |
355 | xitTable.close(); | |
356 | } | |
357 | } | |
358 | ||
359 | /// calculates maximum number of entries we need to store and map | |
360 | int64_t | |
361 | Transients::EntryLimit() | |
362 | { | |
363 | return (UsingSmp() && Store::Controller::SmpAware()) ? | |
364 | Config.shared_transient_entries_limit : 0; | |
365 | } | |
366 | ||
367 | bool | |
368 | Transients::markedForDeletion(const cache_key *key) const | |
369 | { | |
370 | assert(map); | |
371 | return map->markedForDeletion(key); | |
372 | } | |
373 | ||
374 | bool | |
375 | Transients::isReader(const StoreEntry &e) const | |
376 | { | |
377 | return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading; | |
378 | } | |
379 | ||
380 | bool | |
381 | Transients::isWriter(const StoreEntry &e) const | |
382 | { | |
383 | return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting; | |
384 | } | |
385 | ||
386 | /// initializes shared memory segment used by Transients | |
387 | class TransientsRr: public Ipc::Mem::RegisteredRunner | |
388 | { | |
389 | public: | |
390 | /* RegisteredRunner API */ | |
391 | void useConfig() override; | |
392 | ~TransientsRr() override; | |
393 | ||
394 | protected: | |
395 | void create() override; | |
396 | ||
397 | private: | |
398 | TransientsMap::Owner *mapOwner = nullptr; | |
399 | }; | |
400 | ||
401 | DefineRunnerRegistrator(TransientsRr); | |
402 | ||
403 | void | |
404 | TransientsRr::useConfig() | |
405 | { | |
406 | assert(Config.memShared.configured()); | |
407 | Ipc::Mem::RegisteredRunner::useConfig(); | |
408 | } | |
409 | ||
410 | void | |
411 | TransientsRr::create() | |
412 | { | |
413 | const int64_t entryLimit = Transients::EntryLimit(); | |
414 | if (entryLimit <= 0) | |
415 | return; // no SMP configured or a misconfiguration | |
416 | ||
417 | Must(!mapOwner); | |
418 | mapOwner = TransientsMap::Init(MapLabel(), entryLimit); | |
419 | } | |
420 | ||
421 | TransientsRr::~TransientsRr() | |
422 | { | |
423 | delete mapOwner; | |
424 | } | |
425 |