]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1996-2023 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 20 Storage Manager */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "base/RunnersRegistry.h" | |
13 | #include "CollapsedForwarding.h" | |
14 | #include "HttpReply.h" | |
15 | #include "ipc/mem/Page.h" | |
16 | #include "ipc/mem/Pages.h" | |
17 | #include "MemObject.h" | |
18 | #include "mime_header.h" | |
19 | #include "SquidConfig.h" | |
20 | #include "SquidMath.h" | |
21 | #include "StoreStats.h" | |
22 | #include "tools.h" | |
23 | #include "Transients.h" | |
24 | ||
25 | #include <limits> | |
26 | ||
27 | /// shared memory segment path to use for Transients map | |
28 | static const SBuf MapLabel("transients_map"); | |
29 | ||
30 | Transients::Transients(): map(nullptr), locals(nullptr) | |
31 | { | |
32 | } | |
33 | ||
34 | Transients::~Transients() | |
35 | { | |
36 | delete map; | |
37 | delete locals; | |
38 | } | |
39 | ||
40 | void | |
41 | Transients::init() | |
42 | { | |
43 | assert(Enabled()); | |
44 | const int64_t entryLimit = EntryLimit(); | |
45 | assert(entryLimit > 0); | |
46 | ||
47 | Must(!map); | |
48 | map = new TransientsMap(MapLabel); | |
49 | map->cleaner = this; | |
50 | map->disableHitValidation(); // Transients lacks slices to validate | |
51 | ||
52 | locals = new Locals(entryLimit, nullptr); | |
53 | } | |
54 | ||
55 | void | |
56 | Transients::getStats(StoreInfoStats &stats) const | |
57 | { | |
58 | #if TRANSIENT_STATS_SUPPORTED | |
59 | const size_t pageSize = Ipc::Mem::PageSize(); | |
60 | ||
61 | stats.mem.shared = true; | |
62 | stats.mem.capacity = | |
63 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
64 | stats.mem.size = | |
65 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
66 | stats.mem.count = currentCount(); | |
67 | #else | |
68 | (void)stats; | |
69 | #endif | |
70 | } | |
71 | ||
72 | void | |
73 | Transients::stat(StoreEntry &e) const | |
74 | { | |
75 | storeAppendPrintf(&e, "\n\nTransient Objects\n"); | |
76 | ||
77 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); | |
78 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
79 | currentSize() / 1024.0, | |
80 | Math::doublePercent(currentSize(), maxSize())); | |
81 | ||
82 | if (map) { | |
83 | const int limit = map->entryLimit(); | |
84 | storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); | |
85 | if (limit > 0) { | |
86 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", | |
87 | currentCount(), (100.0 * currentCount() / limit)); | |
88 | } | |
89 | } | |
90 | } | |
91 | ||
92 | void | |
93 | Transients::maintain() | |
94 | { | |
95 | // no lazy garbage collection needed | |
96 | } | |
97 | ||
98 | uint64_t | |
99 | Transients::minSize() const | |
100 | { | |
101 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
102 | } | |
103 | ||
104 | uint64_t | |
105 | Transients::maxSize() const | |
106 | { | |
107 | // Squid currently does not limit the total size of all transient objects | |
108 | return std::numeric_limits<uint64_t>::max(); | |
109 | } | |
110 | ||
111 | uint64_t | |
112 | Transients::currentSize() const | |
113 | { | |
114 | // TODO: we do not get enough information to calculate this | |
115 | // StoreEntry should update associated stores when its size changes | |
116 | return 0; | |
117 | } | |
118 | ||
119 | uint64_t | |
120 | Transients::currentCount() const | |
121 | { | |
122 | return map ? map->entryCount() : 0; | |
123 | } | |
124 | ||
125 | int64_t | |
126 | Transients::maxObjectSize() const | |
127 | { | |
128 | // Squid currently does not limit the size of a transient object | |
129 | return std::numeric_limits<uint64_t>::max(); | |
130 | } | |
131 | ||
132 | void | |
133 | Transients::reference(StoreEntry &) | |
134 | { | |
135 | // no replacement policy (but the cache(s) storing the entry may have one) | |
136 | } | |
137 | ||
138 | bool | |
139 | Transients::dereference(StoreEntry &) | |
140 | { | |
141 | // no need to keep e in the global store_table for us; we have our own map | |
142 | return false; | |
143 | } | |
144 | ||
145 | StoreEntry * | |
146 | Transients::get(const cache_key *key) | |
147 | { | |
148 | if (!map) | |
149 | return nullptr; | |
150 | ||
151 | sfileno index; | |
152 | const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index); | |
153 | if (!anchor) | |
154 | return nullptr; | |
155 | ||
156 | // If we already have a local entry, the store_table should have found it. | |
157 | // Since it did not, the local entry key must have changed from public to | |
158 | // private. We still need to keep the private entry around for syncing as | |
159 | // its clients depend on it, but we should not allow new clients to join. | |
160 | if (StoreEntry *oldE = locals->at(index)) { | |
161 | debugs(20, 3, "not joining private " << *oldE); | |
162 | assert(EBIT_TEST(oldE->flags, KEY_PRIVATE)); | |
163 | map->closeForReadingAndFreeIdle(index); | |
164 | return nullptr; | |
165 | } | |
166 | ||
167 | StoreEntry *e = new StoreEntry(); | |
168 | e->createMemObject(); | |
169 | anchorEntry(*e, index, *anchor); | |
170 | ||
171 | // keep read lock to receive updates from others | |
172 | return e; | |
173 | } | |
174 | ||
175 | StoreEntry * | |
176 | Transients::findCollapsed(const sfileno index) | |
177 | { | |
178 | if (!map) | |
179 | return nullptr; | |
180 | ||
181 | if (StoreEntry *oldE = locals->at(index)) { | |
182 | debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel); | |
183 | assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index); | |
184 | return oldE; | |
185 | } | |
186 | ||
187 | debugs(20, 3, "no entry at " << index << " in " << MapLabel); | |
188 | return nullptr; | |
189 | } | |
190 | ||
191 | void | |
192 | Transients::monitorIo(StoreEntry *e, const cache_key *key, const Store::IoStatus direction) | |
193 | { | |
194 | if (!e->hasTransients()) { | |
195 | addEntry(e, key, direction); | |
196 | assert(e->hasTransients()); | |
197 | } | |
198 | ||
199 | const auto index = e->mem_obj->xitTable.index; | |
200 | if (const auto old = locals->at(index)) { | |
201 | assert(old == e); | |
202 | } else { | |
203 | // We do not lock e because we do not want to prevent its destruction; | |
204 | // e is tied to us via mem_obj so we will know when it is destructed. | |
205 | locals->at(index) = e; | |
206 | } | |
207 | } | |
208 | ||
209 | /// creates a new Transients entry | |
210 | void | |
211 | Transients::addEntry(StoreEntry *e, const cache_key *key, const Store::IoStatus direction) | |
212 | { | |
213 | assert(e); | |
214 | assert(e->mem_obj); | |
215 | assert(!e->hasTransients()); | |
216 | ||
217 | Must(map); // configured to track transients | |
218 | ||
219 | if (direction == Store::ioWriting) | |
220 | return addWriterEntry(*e, key); | |
221 | ||
222 | assert(direction == Store::ioReading); | |
223 | addReaderEntry(*e, key); | |
224 | } | |
225 | ||
226 | /// addEntry() helper used for cache entry creators/writers | |
227 | void | |
228 | Transients::addWriterEntry(StoreEntry &e, const cache_key *key) | |
229 | { | |
230 | sfileno index = 0; | |
231 | const auto anchor = map->openForWriting(key, index); | |
232 | if (!anchor) | |
233 | throw TextException("writer collision", Here()); | |
234 | ||
235 | // set ASAP in hope to unlock the slot if something throws | |
236 | // and to provide index to such methods as hasWriter() | |
237 | auto &xitTable = e.mem_obj->xitTable; | |
238 | xitTable.index = index; | |
239 | xitTable.io = Store::ioWriting; | |
240 | ||
241 | anchor->set(e, key); | |
242 | // allow reading and receive remote DELETE events, but do not switch to | |
243 | // the reading lock because transientReaders() callers want true readers | |
244 | map->startAppending(index); | |
245 | } | |
246 | ||
247 | /// addEntry() helper used for cache readers | |
248 | /// readers do not modify the cache, but they must create a Transients entry | |
249 | void | |
250 | Transients::addReaderEntry(StoreEntry &e, const cache_key *key) | |
251 | { | |
252 | sfileno index = 0; | |
253 | const auto anchor = map->openOrCreateForReading(key, index, e); | |
254 | if (!anchor) | |
255 | throw TextException("reader collision", Here()); | |
256 | ||
257 | anchorEntry(e, index, *anchor); | |
258 | // keep the entry locked (for reading) to receive remote DELETE events | |
259 | } | |
260 | ||
261 | /// fills (recently created) StoreEntry with information currently in Transients | |
262 | void | |
263 | Transients::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor) | |
264 | { | |
265 | // set ASAP in hope to unlock the slot if something throws | |
266 | // and to provide index to such methods as hasWriter() | |
267 | auto &xitTable = e.mem_obj->xitTable; | |
268 | xitTable.index = index; | |
269 | xitTable.io = Store::ioReading; | |
270 | ||
271 | anchor.exportInto(e); | |
272 | } | |
273 | ||
274 | bool | |
275 | Transients::hasWriter(const StoreEntry &e) | |
276 | { | |
277 | if (!e.hasTransients()) | |
278 | return false; | |
279 | return map->peekAtWriter(e.mem_obj->xitTable.index); | |
280 | } | |
281 | ||
282 | void | |
283 | Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId) | |
284 | { | |
285 | // TODO: we should probably find the entry being deleted and abort it | |
286 | } | |
287 | ||
288 | void | |
289 | Transients::status(const StoreEntry &entry, Transients::EntryStatus &entryStatus) const | |
290 | { | |
291 | assert(map); | |
292 | assert(entry.hasTransients()); | |
293 | const auto idx = entry.mem_obj->xitTable.index; | |
294 | const auto &anchor = isWriter(entry) ? | |
295 | map->writeableEntry(idx) : map->readableEntry(idx); | |
296 | entryStatus.hasWriter = anchor.writing(); | |
297 | entryStatus.waitingToBeFreed = anchor.waitingToBeFreed; | |
298 | } | |
299 | ||
300 | void | |
301 | Transients::completeWriting(const StoreEntry &e) | |
302 | { | |
303 | debugs(20, 5, e); | |
304 | assert(e.hasTransients()); | |
305 | assert(isWriter(e)); | |
306 | map->switchWritingToReading(e.mem_obj->xitTable.index); | |
307 | e.mem_obj->xitTable.io = Store::ioReading; | |
308 | CollapsedForwarding::Broadcast(e); | |
309 | } | |
310 | ||
311 | int | |
312 | Transients::readers(const StoreEntry &e) const | |
313 | { | |
314 | if (e.hasTransients()) { | |
315 | assert(map); | |
316 | return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers; | |
317 | } | |
318 | return 0; | |
319 | } | |
320 | ||
321 | void | |
322 | Transients::evictCached(StoreEntry &e) | |
323 | { | |
324 | debugs(20, 5, e); | |
325 | if (e.hasTransients()) { | |
326 | const auto index = e.mem_obj->xitTable.index; | |
327 | if (map->freeEntry(index)) { | |
328 | // Delay syncCollapsed(index) which may end `e` wait for updates. | |
329 | // Calling it directly/here creates complex reentrant call chains. | |
330 | CollapsedForwarding::Broadcast(e, true); | |
331 | } | |
332 | } // else nothing to do because e must be private | |
333 | } | |
334 | ||
335 | void | |
336 | Transients::evictIfFound(const cache_key *key) | |
337 | { | |
338 | if (!map) | |
339 | return; | |
340 | ||
341 | const sfileno index = map->fileNoByKey(key); | |
342 | if (map->freeEntry(index)) | |
343 | CollapsedForwarding::Broadcast(index, true); | |
344 | } | |
345 | ||
346 | void | |
347 | Transients::disconnect(StoreEntry &entry) | |
348 | { | |
349 | debugs(20, 5, entry); | |
350 | if (entry.hasTransients()) { | |
351 | auto &xitTable = entry.mem_obj->xitTable; | |
352 | assert(map); | |
353 | if (isWriter(entry)) { | |
354 | // completeWriting() was not called, so there could be an active | |
355 | // Store writer out there, but we should not abortWriting() here | |
356 | // because another writer may have succeeded, making readers happy. | |
357 | // If none succeeded, the readers will notice the lack of writers. | |
358 | map->closeForWriting(xitTable.index); | |
359 | CollapsedForwarding::Broadcast(entry); | |
360 | } else { | |
361 | assert(isReader(entry)); | |
362 | map->closeForReadingAndFreeIdle(xitTable.index); | |
363 | } | |
364 | locals->at(xitTable.index) = nullptr; | |
365 | xitTable.index = -1; | |
366 | xitTable.io = Store::ioDone; | |
367 | } | |
368 | } | |
369 | ||
370 | /// calculates maximum number of entries we need to store and map | |
371 | int64_t | |
372 | Transients::EntryLimit() | |
373 | { | |
374 | return (UsingSmp() && Store::Controller::SmpAware()) ? | |
375 | Config.shared_transient_entries_limit : 0; | |
376 | } | |
377 | ||
378 | bool | |
379 | Transients::markedForDeletion(const cache_key *key) const | |
380 | { | |
381 | assert(map); | |
382 | return map->markedForDeletion(key); | |
383 | } | |
384 | ||
385 | bool | |
386 | Transients::isReader(const StoreEntry &e) const | |
387 | { | |
388 | return e.mem_obj && e.mem_obj->xitTable.io == Store::ioReading; | |
389 | } | |
390 | ||
391 | bool | |
392 | Transients::isWriter(const StoreEntry &e) const | |
393 | { | |
394 | return e.mem_obj && e.mem_obj->xitTable.io == Store::ioWriting; | |
395 | } | |
396 | ||
397 | /// initializes shared memory segment used by Transients | |
398 | class TransientsRr: public Ipc::Mem::RegisteredRunner | |
399 | { | |
400 | public: | |
401 | /* RegisteredRunner API */ | |
402 | void useConfig() override; | |
403 | ~TransientsRr() override; | |
404 | ||
405 | protected: | |
406 | void create() override; | |
407 | ||
408 | private: | |
409 | TransientsMap::Owner *mapOwner = nullptr; | |
410 | }; | |
411 | ||
412 | RunnerRegistrationEntry(TransientsRr); | |
413 | ||
414 | void | |
415 | TransientsRr::useConfig() | |
416 | { | |
417 | assert(Config.memShared.configured()); | |
418 | Ipc::Mem::RegisteredRunner::useConfig(); | |
419 | } | |
420 | ||
421 | void | |
422 | TransientsRr::create() | |
423 | { | |
424 | const int64_t entryLimit = Transients::EntryLimit(); | |
425 | if (entryLimit <= 0) | |
426 | return; // no SMP configured or a misconfiguration | |
427 | ||
428 | Must(!mapOwner); | |
429 | mapOwner = TransientsMap::Init(MapLabel, entryLimit); | |
430 | } | |
431 | ||
432 | TransientsRr::~TransientsRr() | |
433 | { | |
434 | delete mapOwner; | |
435 | } | |
436 |