]>
Commit | Line | Data |
---|---|---|
9a9954ba | 1 | /* |
bbc27441 | 2 | * Copyright (C) 1996-2014 The Squid Software Foundation and contributors |
9a9954ba | 3 | * |
bbc27441 AJ |
4 | * Squid software is distributed under GPLv2+ license and includes |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
9a9954ba AR |
7 | */ |
8 | ||
bbc27441 AJ |
9 | /* DEBUG: section 20 Storage Manager */ |
10 | ||
9a9954ba AR |
11 | #include "squid.h" |
12 | #include "base/RunnersRegistry.h" | |
e4d13993 | 13 | #include "CollapsedForwarding.h" |
9a9954ba AR |
14 | #include "HttpReply.h" |
15 | #include "ipc/mem/Page.h" | |
16 | #include "ipc/mem/Pages.h" | |
17 | #include "MemObject.h" | |
9a9954ba AR |
18 | #include "mime_header.h" |
19 | #include "SquidConfig.h" | |
20 | #include "SquidMath.h" | |
21 | #include "StoreStats.h" | |
22 | #include "tools.h" | |
e4d13993 | 23 | #include "Transients.h" |
9a9954ba | 24 | |
9a9954ba | 25 | #include <limits> |
9a9954ba | 26 | |
1860fbac AR |
27 | /// shared memory segment path to use for Transients map |
28 | static const SBuf MapLabel("transients_map"); | |
29 | /// shared memory segment path to use for Transients map extras | |
30 | static const char *ExtrasLabel = "transients_ex"; | |
9a9954ba | 31 | |
6919be24 | 32 | Transients::Transients(): map(NULL), locals(NULL) |
9a9954ba | 33 | { |
9a9954ba AR |
34 | } |
35 | ||
36 | Transients::~Transients() | |
37 | { | |
38 | delete map; | |
6919be24 | 39 | delete locals; |
9a9954ba AR |
40 | } |
41 | ||
42 | void | |
43 | Transients::init() | |
44 | { | |
45 | const int64_t entryLimit = EntryLimit(); | |
46 | if (entryLimit <= 0) | |
47 | return; // no SMP support or a misconfiguration | |
48 | ||
49 | Must(!map); | |
50 | map = new TransientsMap(MapLabel); | |
51 | map->cleaner = this; | |
6919be24 | 52 | |
1860fbac AR |
53 | extras = shm_old(TransientsMapExtras)(ExtrasLabel); |
54 | ||
8bcca0f8 | 55 | locals = new Locals(entryLimit, 0); |
9a9954ba AR |
56 | } |
57 | ||
58 | void | |
59 | Transients::getStats(StoreInfoStats &stats) const | |
60 | { | |
61 | #if TRANSIENT_STATS_SUPPORTED | |
62 | const size_t pageSize = Ipc::Mem::PageSize(); | |
63 | ||
64 | stats.mem.shared = true; | |
65 | stats.mem.capacity = | |
66 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
67 | stats.mem.size = | |
68 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
69 | stats.mem.count = currentCount(); | |
70 | #endif | |
71 | } | |
72 | ||
73 | void | |
74 | Transients::stat(StoreEntry &e) const | |
75 | { | |
76 | storeAppendPrintf(&e, "\n\nTransient Objects\n"); | |
77 | ||
78 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); | |
79 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
80 | currentSize() / 1024.0, | |
81 | Math::doublePercent(currentSize(), maxSize())); | |
82 | ||
83 | if (map) { | |
84 | const int limit = map->entryLimit(); | |
85 | storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); | |
86 | if (limit > 0) { | |
87 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", | |
88 | currentCount(), (100.0 * currentCount() / limit)); | |
89 | } | |
90 | } | |
91 | } | |
92 | ||
93 | void | |
94 | Transients::maintain() | |
95 | { | |
e4d13993 | 96 | // no lazy garbage collection needed |
9a9954ba AR |
97 | } |
98 | ||
99 | uint64_t | |
100 | Transients::minSize() const | |
101 | { | |
102 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
103 | } | |
104 | ||
105 | uint64_t | |
106 | Transients::maxSize() const | |
107 | { | |
108 | // Squid currently does not limit the total size of all transient objects | |
109 | return std::numeric_limits<uint64_t>::max(); | |
110 | } | |
111 | ||
112 | uint64_t | |
113 | Transients::currentSize() const | |
114 | { | |
115 | // TODO: we do not get enough information to calculate this | |
116 | // StoreEntry should update associated stores when its size changes | |
117 | return 0; | |
118 | } | |
119 | ||
120 | uint64_t | |
121 | Transients::currentCount() const | |
122 | { | |
123 | return map ? map->entryCount() : 0; | |
124 | } | |
125 | ||
126 | int64_t | |
127 | Transients::maxObjectSize() const | |
128 | { | |
129 | // Squid currently does not limit the size of a transient object | |
130 | return std::numeric_limits<uint64_t>::max(); | |
131 | } | |
132 | ||
133 | void | |
134 | Transients::reference(StoreEntry &) | |
135 | { | |
e4d13993 | 136 | // no replacement policy (but the cache(s) storing the entry may have one) |
9a9954ba AR |
137 | } |
138 | ||
139 | bool | |
140 | Transients::dereference(StoreEntry &, bool) | |
141 | { | |
142 | // no need to keep e in the global store_table for us; we have our own map | |
143 | return false; | |
144 | } | |
145 | ||
146 | int | |
147 | Transients::callback() | |
148 | { | |
149 | return 0; | |
150 | } | |
151 | ||
152 | StoreSearch * | |
153 | Transients::search(String const, HttpRequest *) | |
154 | { | |
155 | fatal("not implemented"); | |
156 | return NULL; | |
157 | } | |
158 | ||
159 | StoreEntry * | |
160 | Transients::get(const cache_key *key) | |
161 | { | |
162 | if (!map) | |
163 | return NULL; | |
164 | ||
165 | sfileno index; | |
1bfe9ade AR |
166 | const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index); |
167 | if (!anchor) | |
9a9954ba AR |
168 | return NULL; |
169 | ||
6919be24 AR |
170 | // If we already have a local entry, the store_table should have found it. |
171 | // Since it did not, the local entry key must have changed from public to | |
172 | // private. We still need to keep the private entry around for syncing as | |
173 | // its clients depend on it, but we should not allow new clients to join. | |
174 | if (StoreEntry *oldE = locals->at(index)) { | |
175 | debugs(20, 3, "not joining private " << *oldE); | |
176 | assert(EBIT_TEST(oldE->flags, KEY_PRIVATE)); | |
177 | } else if (StoreEntry *newE = copyFromShm(index)) { | |
178 | return newE; // keep read lock to receive updates from others | |
1bfe9ade | 179 | } |
4475555f | 180 | |
6919be24 | 181 | // private entry or loading failure |
4475555f AR |
182 | map->closeForReading(index); |
183 | return NULL; | |
184 | } | |
185 | ||
186 | StoreEntry * | |
187 | Transients::copyFromShm(const sfileno index) | |
188 | { | |
1860fbac | 189 | const TransientsMapExtras::Item &extra = extras->items[index]; |
9a9954ba AR |
190 | |
191 | // create a brand new store entry and initialize it with stored info | |
1860fbac AR |
192 | StoreEntry *e = storeCreatePureEntry(extra.url, extra.url, |
193 | extra.reqFlags, extra.reqMethod); | |
9a9954ba AR |
194 | |
195 | assert(e->mem_obj); | |
1860fbac | 196 | e->mem_obj->method = extra.reqMethod; |
99921d9d | 197 | e->mem_obj->xitTable.io = MemObject::ioReading; |
4475555f | 198 | e->mem_obj->xitTable.index = index; |
9a9954ba | 199 | |
9a9954ba | 200 | e->setPublicKey(); |
ce49546e | 201 | assert(e->key); |
9a9954ba | 202 | |
4475555f AR |
203 | // How do we know its SMP- and not just locally-collapsed? A worker gets |
204 | // locally-collapsed entries from the local store_table, not Transients. | |
205 | // TODO: Can we remove smpCollapsed by not syncing non-transient entries? | |
206 | e->mem_obj->smpCollapsed = true; | |
207 | ||
6919be24 AR |
208 | assert(!locals->at(index)); |
209 | // We do not lock e because we do not want to prevent its destruction; | |
210 | // e is tied to us via mem_obj so we will know when it is destructed. | |
211 | locals->at(index) = e; | |
9a9954ba AR |
212 | return e; |
213 | } | |
214 | ||
215 | void | |
216 | Transients::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData) | |
217 | { | |
218 | // XXX: not needed but Store parent forces us to implement this | |
219 | fatal("Transients::get(key,callback,data) should not be called"); | |
220 | } | |
221 | ||
6919be24 AR |
222 | StoreEntry * |
223 | Transients::findCollapsed(const sfileno index) | |
224 | { | |
225 | if (!map) | |
226 | return NULL; | |
227 | ||
228 | if (StoreEntry *oldE = locals->at(index)) { | |
229 | debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel); | |
230 | assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index); | |
231 | return oldE; | |
232 | } | |
233 | ||
234 | debugs(20, 3, "no entry at " << index << " in " << MapLabel); | |
235 | return NULL; | |
236 | } | |
237 | ||
9a9954ba | 238 | void |
99921d9d | 239 | Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags, |
9d4e9cfb | 240 | const HttpRequestMethod &reqMethod) |
9a9954ba AR |
241 | { |
242 | assert(e); | |
4475555f AR |
243 | assert(e->mem_obj); |
244 | assert(e->mem_obj->xitTable.index < 0); | |
9a9954ba AR |
245 | |
246 | if (!map) { | |
247 | debugs(20, 5, "No map to add " << *e); | |
248 | return; | |
9d4e9cfb | 249 | } |
9a9954ba AR |
250 | |
251 | sfileno index = 0; | |
252 | Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index); | |
253 | if (!slot) { | |
4475555f | 254 | debugs(20, 5, "collision registering " << *e); |
9a9954ba | 255 | return; |
9d4e9cfb | 256 | } |
9a9954ba AR |
257 | |
258 | try { | |
259 | if (copyToShm(*e, index, reqFlags, reqMethod)) { | |
260 | slot->set(*e); | |
99921d9d | 261 | e->mem_obj->xitTable.io = MemObject::ioWriting; |
4475555f AR |
262 | e->mem_obj->xitTable.index = index; |
263 | map->startAppending(index); | |
264 | // keep write lock -- we will be supplying others with updates | |
9a9954ba | 265 | return; |
9d4e9cfb | 266 | } |
9a9954ba | 267 | // fall through to the error handling code |
9d4e9cfb | 268 | } catch (const std::exception &x) { // TODO: should we catch ... as well? |
9a9954ba AR |
269 | debugs(20, 2, "error keeping entry " << index << |
270 | ' ' << *e << ": " << x.what()); | |
271 | // fall through to the error handling code | |
9d4e9cfb | 272 | } |
9a9954ba | 273 | |
4475555f | 274 | map->abortWriting(index); |
9a9954ba AR |
275 | } |
276 | ||
9a9954ba AR |
277 | /// copies all relevant local data to shared memory |
278 | bool | |
279 | Transients::copyToShm(const StoreEntry &e, const sfileno index, | |
280 | const RequestFlags &reqFlags, | |
281 | const HttpRequestMethod &reqMethod) | |
282 | { | |
1860fbac | 283 | TransientsMapExtras::Item &extra = extras->items[index]; |
9a9954ba AR |
284 | |
285 | const char *url = e.url(); | |
286 | const size_t urlLen = strlen(url); | |
1860fbac AR |
287 | Must(urlLen < sizeof(extra.url)); // we have space to store it all, plus 0 |
288 | strncpy(extra.url, url, sizeof(extra.url)); | |
289 | extra.url[urlLen] = '\0'; | |
9a9954ba | 290 | |
1860fbac | 291 | extra.reqFlags = reqFlags; |
9a9954ba AR |
292 | |
293 | Must(reqMethod != Http::METHOD_OTHER); | |
1860fbac | 294 | extra.reqMethod = reqMethod.id(); |
9a9954ba AR |
295 | |
296 | return true; | |
297 | } | |
298 | ||
299 | void | |
36c84e19 | 300 | Transients::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) |
9a9954ba AR |
301 | { |
302 | // TODO: we should probably find the entry being deleted and abort it | |
303 | } | |
304 | ||
4475555f AR |
305 | void |
306 | Transients::abandon(const StoreEntry &e) | |
307 | { | |
308 | assert(e.mem_obj && map); | |
309 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
1bfe9ade | 310 | CollapsedForwarding::Broadcast(e); |
4475555f AR |
311 | // We do not unlock the entry now because the problem is most likely with |
312 | // the server resource rather than a specific cache writer, so we want to | |
313 | // prevent other readers from collapsing requests for that resource. | |
314 | } | |
315 | ||
316 | bool | |
317 | Transients::abandoned(const StoreEntry &e) const | |
318 | { | |
319 | assert(e.mem_obj); | |
320 | return abandonedAt(e.mem_obj->xitTable.index); | |
321 | } | |
322 | ||
323 | /// whether an in-transit entry at the index is now abandoned by its writer | |
324 | bool | |
325 | Transients::abandonedAt(const sfileno index) const | |
326 | { | |
327 | assert(map); | |
328 | return map->readableEntry(index).waitingToBeFreed; | |
329 | } | |
330 | ||
99921d9d AR |
331 | void |
332 | Transients::completeWriting(const StoreEntry &e) | |
333 | { | |
334 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
335 | assert(e.mem_obj->xitTable.io == MemObject::ioWriting); | |
6919be24 AR |
336 | // there will be no more updates from us after this, so we must prevent |
337 | // future readers from joining | |
338 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
99921d9d AR |
339 | map->closeForWriting(e.mem_obj->xitTable.index); |
340 | e.mem_obj->xitTable.index = -1; | |
341 | e.mem_obj->xitTable.io = MemObject::ioDone; | |
342 | } | |
343 | } | |
344 | ||
d366a7fa AR |
345 | int |
346 | Transients::readers(const StoreEntry &e) const | |
347 | { | |
348 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
349 | assert(map); | |
350 | return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers; | |
351 | } | |
352 | return 0; | |
353 | } | |
354 | ||
1bfe9ade AR |
355 | void |
356 | Transients::markForUnlink(StoreEntry &e) | |
357 | { | |
358 | if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting) | |
359 | abandon(e); | |
360 | } | |
361 | ||
4475555f AR |
362 | void |
363 | Transients::disconnect(MemObject &mem_obj) | |
364 | { | |
99921d9d AR |
365 | if (mem_obj.xitTable.index >= 0) { |
366 | assert(map); | |
367 | if (mem_obj.xitTable.io == MemObject::ioWriting) { | |
368 | map->abortWriting(mem_obj.xitTable.index); | |
369 | } else { | |
370 | assert(mem_obj.xitTable.io == MemObject::ioReading); | |
371 | map->closeForReading(mem_obj.xitTable.index); | |
372 | } | |
6919be24 | 373 | locals->at(mem_obj.xitTable.index) = NULL; |
99921d9d AR |
374 | mem_obj.xitTable.index = -1; |
375 | mem_obj.xitTable.io = MemObject::ioDone; | |
376 | } | |
4475555f AR |
377 | } |
378 | ||
9a9954ba AR |
379 | /// calculates maximum number of entries we need to store and map |
380 | int64_t | |
381 | Transients::EntryLimit() | |
382 | { | |
383 | // TODO: we should also check whether any SMP-aware caching is configured | |
384 | if (!UsingSmp() || !Config.onoff.collapsed_forwarding) | |
385 | return 0; // no SMP collapsed forwarding possible or needed | |
386 | ||
e6d2c263 | 387 | return 16*1024; // TODO: make configurable? |
9a9954ba AR |
388 | } |
389 | ||
390 | /// initializes shared memory segment used by Transients | |
391 | class TransientsRr: public Ipc::Mem::RegisteredRunner | |
392 | { | |
393 | public: | |
394 | /* RegisteredRunner API */ | |
1860fbac | 395 | TransientsRr(): mapOwner(NULL), extrasOwner(NULL) {} |
21b7990f | 396 | virtual void useConfig(); |
9a9954ba AR |
397 | virtual ~TransientsRr(); |
398 | ||
399 | protected: | |
21b7990f | 400 | virtual void create(); |
9a9954ba AR |
401 | |
402 | private: | |
403 | TransientsMap::Owner *mapOwner; | |
1860fbac | 404 | Ipc::Mem::Owner<TransientsMapExtras> *extrasOwner; |
9a9954ba AR |
405 | }; |
406 | ||
21b7990f | 407 | RunnerRegistrationEntry(TransientsRr); |
9a9954ba | 408 | |
e4d13993 | 409 | void |
21b7990f | 410 | TransientsRr::useConfig() |
9a9954ba AR |
411 | { |
412 | assert(Config.memShared.configured()); | |
21b7990f | 413 | Ipc::Mem::RegisteredRunner::useConfig(); |
9a9954ba AR |
414 | } |
415 | ||
e4d13993 | 416 | void |
21b7990f | 417 | TransientsRr::create() |
9a9954ba | 418 | { |
9a9954ba AR |
419 | if (!Config.onoff.collapsed_forwarding) |
420 | return; | |
421 | ||
422 | const int64_t entryLimit = Transients::EntryLimit(); | |
9a9954ba AR |
423 | if (entryLimit <= 0) |
424 | return; // no SMP configured or a misconfiguration | |
425 | ||
426 | Must(!mapOwner); | |
427 | mapOwner = TransientsMap::Init(MapLabel, entryLimit); | |
1860fbac AR |
428 | Must(!extrasOwner); |
429 | extrasOwner = shm_new(TransientsMapExtras)(ExtrasLabel, entryLimit); | |
9a9954ba AR |
430 | } |
431 | ||
432 | TransientsRr::~TransientsRr() | |
433 | { | |
1860fbac | 434 | delete extrasOwner; |
9a9954ba AR |
435 | delete mapOwner; |
436 | } |