]>
Commit | Line | Data |
---|---|---|
9a9954ba | 1 | /* |
4ac4a490 | 2 | * Copyright (C) 1996-2017 The Squid Software Foundation and contributors |
9a9954ba | 3 | * |
bbc27441 AJ |
4 | * Squid software is distributed under GPLv2+ license and includes |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
9a9954ba AR |
7 | */ |
8 | ||
bbc27441 AJ |
9 | /* DEBUG: section 20 Storage Manager */ |
10 | ||
9a9954ba AR |
11 | #include "squid.h" |
12 | #include "base/RunnersRegistry.h" | |
e4d13993 | 13 | #include "CollapsedForwarding.h" |
9a9954ba AR |
14 | #include "HttpReply.h" |
15 | #include "ipc/mem/Page.h" | |
16 | #include "ipc/mem/Pages.h" | |
17 | #include "MemObject.h" | |
9a9954ba AR |
18 | #include "mime_header.h" |
19 | #include "SquidConfig.h" | |
20 | #include "SquidMath.h" | |
21 | #include "StoreStats.h" | |
22 | #include "tools.h" | |
e4d13993 | 23 | #include "Transients.h" |
9a9954ba | 24 | |
9a9954ba | 25 | #include <limits> |
9a9954ba | 26 | |
1860fbac AR |
27 | /// shared memory segment path to use for Transients map |
28 | static const SBuf MapLabel("transients_map"); | |
29 | /// shared memory segment path to use for Transients map extras | |
30 | static const char *ExtrasLabel = "transients_ex"; | |
9a9954ba | 31 | |
6919be24 | 32 | Transients::Transients(): map(NULL), locals(NULL) |
9a9954ba | 33 | { |
9a9954ba AR |
34 | } |
35 | ||
36 | Transients::~Transients() | |
37 | { | |
38 | delete map; | |
6919be24 | 39 | delete locals; |
9a9954ba AR |
40 | } |
41 | ||
42 | void | |
43 | Transients::init() | |
44 | { | |
45 | const int64_t entryLimit = EntryLimit(); | |
46 | if (entryLimit <= 0) | |
47 | return; // no SMP support or a misconfiguration | |
48 | ||
49 | Must(!map); | |
50 | map = new TransientsMap(MapLabel); | |
51 | map->cleaner = this; | |
6919be24 | 52 | |
1860fbac AR |
53 | extras = shm_old(TransientsMapExtras)(ExtrasLabel); |
54 | ||
8bcca0f8 | 55 | locals = new Locals(entryLimit, 0); |
9a9954ba AR |
56 | } |
57 | ||
58 | void | |
59 | Transients::getStats(StoreInfoStats &stats) const | |
60 | { | |
61 | #if TRANSIENT_STATS_SUPPORTED | |
62 | const size_t pageSize = Ipc::Mem::PageSize(); | |
63 | ||
64 | stats.mem.shared = true; | |
65 | stats.mem.capacity = | |
66 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
67 | stats.mem.size = | |
68 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
69 | stats.mem.count = currentCount(); | |
70 | #endif | |
71 | } | |
72 | ||
73 | void | |
74 | Transients::stat(StoreEntry &e) const | |
75 | { | |
76 | storeAppendPrintf(&e, "\n\nTransient Objects\n"); | |
77 | ||
78 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); | |
79 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
80 | currentSize() / 1024.0, | |
81 | Math::doublePercent(currentSize(), maxSize())); | |
82 | ||
83 | if (map) { | |
84 | const int limit = map->entryLimit(); | |
85 | storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); | |
86 | if (limit > 0) { | |
87 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", | |
88 | currentCount(), (100.0 * currentCount() / limit)); | |
89 | } | |
90 | } | |
91 | } | |
92 | ||
93 | void | |
94 | Transients::maintain() | |
95 | { | |
e4d13993 | 96 | // no lazy garbage collection needed |
9a9954ba AR |
97 | } |
98 | ||
99 | uint64_t | |
100 | Transients::minSize() const | |
101 | { | |
102 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
103 | } | |
104 | ||
105 | uint64_t | |
106 | Transients::maxSize() const | |
107 | { | |
108 | // Squid currently does not limit the total size of all transient objects | |
109 | return std::numeric_limits<uint64_t>::max(); | |
110 | } | |
111 | ||
112 | uint64_t | |
113 | Transients::currentSize() const | |
114 | { | |
115 | // TODO: we do not get enough information to calculate this | |
116 | // StoreEntry should update associated stores when its size changes | |
117 | return 0; | |
118 | } | |
119 | ||
120 | uint64_t | |
121 | Transients::currentCount() const | |
122 | { | |
123 | return map ? map->entryCount() : 0; | |
124 | } | |
125 | ||
126 | int64_t | |
127 | Transients::maxObjectSize() const | |
128 | { | |
129 | // Squid currently does not limit the size of a transient object | |
130 | return std::numeric_limits<uint64_t>::max(); | |
131 | } | |
132 | ||
133 | void | |
134 | Transients::reference(StoreEntry &) | |
135 | { | |
e4d13993 | 136 | // no replacement policy (but the cache(s) storing the entry may have one) |
9a9954ba AR |
137 | } |
138 | ||
139 | bool | |
2745fea5 | 140 | Transients::dereference(StoreEntry &) |
9a9954ba AR |
141 | { |
142 | // no need to keep e in the global store_table for us; we have our own map | |
143 | return false; | |
144 | } | |
145 | ||
9a9954ba AR |
146 | StoreEntry * |
147 | Transients::get(const cache_key *key) | |
148 | { | |
149 | if (!map) | |
150 | return NULL; | |
151 | ||
152 | sfileno index; | |
1bfe9ade AR |
153 | const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index); |
154 | if (!anchor) | |
9a9954ba AR |
155 | return NULL; |
156 | ||
6919be24 AR |
157 | // If we already have a local entry, the store_table should have found it. |
158 | // Since it did not, the local entry key must have changed from public to | |
159 | // private. We still need to keep the private entry around for syncing as | |
160 | // its clients depend on it, but we should not allow new clients to join. | |
161 | if (StoreEntry *oldE = locals->at(index)) { | |
162 | debugs(20, 3, "not joining private " << *oldE); | |
163 | assert(EBIT_TEST(oldE->flags, KEY_PRIVATE)); | |
164 | } else if (StoreEntry *newE = copyFromShm(index)) { | |
165 | return newE; // keep read lock to receive updates from others | |
1bfe9ade | 166 | } |
4475555f | 167 | |
6919be24 | 168 | // private entry or loading failure |
4475555f AR |
169 | map->closeForReading(index); |
170 | return NULL; | |
171 | } | |
172 | ||
173 | StoreEntry * | |
174 | Transients::copyFromShm(const sfileno index) | |
175 | { | |
1860fbac | 176 | const TransientsMapExtras::Item &extra = extras->items[index]; |
9a9954ba AR |
177 | |
178 | // create a brand new store entry and initialize it with stored info | |
1860fbac AR |
179 | StoreEntry *e = storeCreatePureEntry(extra.url, extra.url, |
180 | extra.reqFlags, extra.reqMethod); | |
9a9954ba AR |
181 | |
182 | assert(e->mem_obj); | |
1860fbac | 183 | e->mem_obj->method = extra.reqMethod; |
99921d9d | 184 | e->mem_obj->xitTable.io = MemObject::ioReading; |
4475555f | 185 | e->mem_obj->xitTable.index = index; |
9a9954ba | 186 | |
1a210de4 EB |
187 | // TODO: Support collapsed revalidation for SMP-aware caches. |
188 | e->setPublicKey(ksDefault); | |
ce49546e | 189 | assert(e->key); |
9a9954ba | 190 | |
4475555f AR |
191 | // How do we know its SMP- and not just locally-collapsed? A worker gets |
192 | // locally-collapsed entries from the local store_table, not Transients. | |
193 | // TODO: Can we remove smpCollapsed by not syncing non-transient entries? | |
194 | e->mem_obj->smpCollapsed = true; | |
195 | ||
6919be24 AR |
196 | assert(!locals->at(index)); |
197 | // We do not lock e because we do not want to prevent its destruction; | |
198 | // e is tied to us via mem_obj so we will know when it is destructed. | |
199 | locals->at(index) = e; | |
9a9954ba AR |
200 | return e; |
201 | } | |
202 | ||
6919be24 AR |
203 | StoreEntry * |
204 | Transients::findCollapsed(const sfileno index) | |
205 | { | |
206 | if (!map) | |
207 | return NULL; | |
208 | ||
209 | if (StoreEntry *oldE = locals->at(index)) { | |
210 | debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel); | |
211 | assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index); | |
212 | return oldE; | |
213 | } | |
214 | ||
215 | debugs(20, 3, "no entry at " << index << " in " << MapLabel); | |
216 | return NULL; | |
217 | } | |
218 | ||
9a9954ba | 219 | void |
99921d9d | 220 | Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags, |
9d4e9cfb | 221 | const HttpRequestMethod &reqMethod) |
9a9954ba AR |
222 | { |
223 | assert(e); | |
4475555f AR |
224 | assert(e->mem_obj); |
225 | assert(e->mem_obj->xitTable.index < 0); | |
9a9954ba AR |
226 | |
227 | if (!map) { | |
228 | debugs(20, 5, "No map to add " << *e); | |
229 | return; | |
9d4e9cfb | 230 | } |
9a9954ba AR |
231 | |
232 | sfileno index = 0; | |
233 | Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index); | |
234 | if (!slot) { | |
4475555f | 235 | debugs(20, 5, "collision registering " << *e); |
9a9954ba | 236 | return; |
9d4e9cfb | 237 | } |
9a9954ba AR |
238 | |
239 | try { | |
240 | if (copyToShm(*e, index, reqFlags, reqMethod)) { | |
241 | slot->set(*e); | |
99921d9d | 242 | e->mem_obj->xitTable.io = MemObject::ioWriting; |
4475555f AR |
243 | e->mem_obj->xitTable.index = index; |
244 | map->startAppending(index); | |
245 | // keep write lock -- we will be supplying others with updates | |
9a9954ba | 246 | return; |
9d4e9cfb | 247 | } |
9a9954ba | 248 | // fall through to the error handling code |
9d4e9cfb | 249 | } catch (const std::exception &x) { // TODO: should we catch ... as well? |
9a9954ba AR |
250 | debugs(20, 2, "error keeping entry " << index << |
251 | ' ' << *e << ": " << x.what()); | |
252 | // fall through to the error handling code | |
9d4e9cfb | 253 | } |
9a9954ba | 254 | |
4475555f | 255 | map->abortWriting(index); |
9a9954ba AR |
256 | } |
257 | ||
9a9954ba AR |
258 | /// copies all relevant local data to shared memory |
259 | bool | |
260 | Transients::copyToShm(const StoreEntry &e, const sfileno index, | |
261 | const RequestFlags &reqFlags, | |
262 | const HttpRequestMethod &reqMethod) | |
263 | { | |
1860fbac | 264 | TransientsMapExtras::Item &extra = extras->items[index]; |
9a9954ba AR |
265 | |
266 | const char *url = e.url(); | |
267 | const size_t urlLen = strlen(url); | |
1860fbac AR |
268 | Must(urlLen < sizeof(extra.url)); // we have space to store it all, plus 0 |
269 | strncpy(extra.url, url, sizeof(extra.url)); | |
270 | extra.url[urlLen] = '\0'; | |
9a9954ba | 271 | |
1860fbac | 272 | extra.reqFlags = reqFlags; |
9a9954ba AR |
273 | |
274 | Must(reqMethod != Http::METHOD_OTHER); | |
1860fbac | 275 | extra.reqMethod = reqMethod.id(); |
9a9954ba AR |
276 | |
277 | return true; | |
278 | } | |
279 | ||
280 | void | |
ced8def3 | 281 | Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId) |
9a9954ba AR |
282 | { |
283 | // TODO: we should probably find the entry being deleted and abort it | |
284 | } | |
285 | ||
4475555f AR |
286 | void |
287 | Transients::abandon(const StoreEntry &e) | |
288 | { | |
289 | assert(e.mem_obj && map); | |
290 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
1bfe9ade | 291 | CollapsedForwarding::Broadcast(e); |
4475555f AR |
292 | // We do not unlock the entry now because the problem is most likely with |
293 | // the server resource rather than a specific cache writer, so we want to | |
294 | // prevent other readers from collapsing requests for that resource. | |
295 | } | |
296 | ||
297 | bool | |
298 | Transients::abandoned(const StoreEntry &e) const | |
299 | { | |
300 | assert(e.mem_obj); | |
301 | return abandonedAt(e.mem_obj->xitTable.index); | |
302 | } | |
303 | ||
304 | /// whether an in-transit entry at the index is now abandoned by its writer | |
305 | bool | |
306 | Transients::abandonedAt(const sfileno index) const | |
307 | { | |
308 | assert(map); | |
309 | return map->readableEntry(index).waitingToBeFreed; | |
310 | } | |
311 | ||
99921d9d AR |
312 | void |
313 | Transients::completeWriting(const StoreEntry &e) | |
314 | { | |
315 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
316 | assert(e.mem_obj->xitTable.io == MemObject::ioWriting); | |
6919be24 AR |
317 | // there will be no more updates from us after this, so we must prevent |
318 | // future readers from joining | |
319 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
99921d9d AR |
320 | map->closeForWriting(e.mem_obj->xitTable.index); |
321 | e.mem_obj->xitTable.index = -1; | |
322 | e.mem_obj->xitTable.io = MemObject::ioDone; | |
323 | } | |
324 | } | |
325 | ||
d366a7fa AR |
326 | int |
327 | Transients::readers(const StoreEntry &e) const | |
328 | { | |
329 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
330 | assert(map); | |
331 | return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers; | |
332 | } | |
333 | return 0; | |
334 | } | |
335 | ||
1bfe9ade AR |
336 | void |
337 | Transients::markForUnlink(StoreEntry &e) | |
2745fea5 AR |
338 | { |
339 | unlink(e); | |
340 | } | |
341 | ||
342 | void | |
343 | Transients::unlink(StoreEntry &e) | |
1bfe9ade AR |
344 | { |
345 | if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting) | |
346 | abandon(e); | |
347 | } | |
348 | ||
4475555f AR |
349 | void |
350 | Transients::disconnect(MemObject &mem_obj) | |
351 | { | |
99921d9d AR |
352 | if (mem_obj.xitTable.index >= 0) { |
353 | assert(map); | |
354 | if (mem_obj.xitTable.io == MemObject::ioWriting) { | |
355 | map->abortWriting(mem_obj.xitTable.index); | |
356 | } else { | |
357 | assert(mem_obj.xitTable.io == MemObject::ioReading); | |
358 | map->closeForReading(mem_obj.xitTable.index); | |
359 | } | |
6919be24 | 360 | locals->at(mem_obj.xitTable.index) = NULL; |
99921d9d AR |
361 | mem_obj.xitTable.index = -1; |
362 | mem_obj.xitTable.io = MemObject::ioDone; | |
363 | } | |
4475555f AR |
364 | } |
365 | ||
9a9954ba AR |
366 | /// calculates maximum number of entries we need to store and map |
367 | int64_t | |
368 | Transients::EntryLimit() | |
369 | { | |
370 | // TODO: we should also check whether any SMP-aware caching is configured | |
371 | if (!UsingSmp() || !Config.onoff.collapsed_forwarding) | |
372 | return 0; // no SMP collapsed forwarding possible or needed | |
373 | ||
8f7dbf74 | 374 | return Config.collapsed_forwarding_shared_entries_limit; |
9a9954ba AR |
375 | } |
376 | ||
377 | /// initializes shared memory segment used by Transients | |
378 | class TransientsRr: public Ipc::Mem::RegisteredRunner | |
379 | { | |
380 | public: | |
381 | /* RegisteredRunner API */ | |
1860fbac | 382 | TransientsRr(): mapOwner(NULL), extrasOwner(NULL) {} |
21b7990f | 383 | virtual void useConfig(); |
9a9954ba AR |
384 | virtual ~TransientsRr(); |
385 | ||
386 | protected: | |
21b7990f | 387 | virtual void create(); |
9a9954ba AR |
388 | |
389 | private: | |
390 | TransientsMap::Owner *mapOwner; | |
1860fbac | 391 | Ipc::Mem::Owner<TransientsMapExtras> *extrasOwner; |
9a9954ba AR |
392 | }; |
393 | ||
21b7990f | 394 | RunnerRegistrationEntry(TransientsRr); |
9a9954ba | 395 | |
e4d13993 | 396 | void |
21b7990f | 397 | TransientsRr::useConfig() |
9a9954ba AR |
398 | { |
399 | assert(Config.memShared.configured()); | |
21b7990f | 400 | Ipc::Mem::RegisteredRunner::useConfig(); |
9a9954ba AR |
401 | } |
402 | ||
e4d13993 | 403 | void |
21b7990f | 404 | TransientsRr::create() |
9a9954ba | 405 | { |
9a9954ba AR |
406 | if (!Config.onoff.collapsed_forwarding) |
407 | return; | |
408 | ||
409 | const int64_t entryLimit = Transients::EntryLimit(); | |
9a9954ba AR |
410 | if (entryLimit <= 0) |
411 | return; // no SMP configured or a misconfiguration | |
412 | ||
413 | Must(!mapOwner); | |
414 | mapOwner = TransientsMap::Init(MapLabel, entryLimit); | |
1860fbac AR |
415 | Must(!extrasOwner); |
416 | extrasOwner = shm_new(TransientsMapExtras)(ExtrasLabel, entryLimit); | |
9a9954ba AR |
417 | } |
418 | ||
419 | TransientsRr::~TransientsRr() | |
420 | { | |
1860fbac | 421 | delete extrasOwner; |
9a9954ba AR |
422 | delete mapOwner; |
423 | } | |
f53969cc | 424 |