]>
Commit | Line | Data |
---|---|---|
9a9954ba AR |
1 | /* |
2 | * DEBUG: section 20 Storage Manager | |
3 | * | |
4 | */ | |
5 | ||
6 | #include "squid.h" | |
7 | #include "base/RunnersRegistry.h" | |
e4d13993 | 8 | #include "CollapsedForwarding.h" |
9a9954ba AR |
9 | #include "HttpReply.h" |
10 | #include "ipc/mem/Page.h" | |
11 | #include "ipc/mem/Pages.h" | |
12 | #include "MemObject.h" | |
9a9954ba AR |
13 | #include "mime_header.h" |
14 | #include "SquidConfig.h" | |
15 | #include "SquidMath.h" | |
16 | #include "StoreStats.h" | |
17 | #include "tools.h" | |
e4d13993 | 18 | #include "Transients.h" |
9a9954ba | 19 | |
9a9954ba | 20 | #include <limits> |
9a9954ba | 21 | |
9a9954ba AR |
22 | /// shared memory segment path to use for Transients maps |
23 | static const char *MapLabel = "transients_map"; | |
24 | ||
6919be24 | 25 | Transients::Transients(): map(NULL), locals(NULL) |
9a9954ba | 26 | { |
9a9954ba AR |
27 | } |
28 | ||
29 | Transients::~Transients() | |
30 | { | |
31 | delete map; | |
6919be24 | 32 | delete locals; |
9a9954ba AR |
33 | } |
34 | ||
35 | void | |
36 | Transients::init() | |
37 | { | |
38 | const int64_t entryLimit = EntryLimit(); | |
39 | if (entryLimit <= 0) | |
40 | return; // no SMP support or a misconfiguration | |
41 | ||
42 | Must(!map); | |
43 | map = new TransientsMap(MapLabel); | |
44 | map->cleaner = this; | |
6919be24 | 45 | |
8bcca0f8 | 46 | locals = new Locals(entryLimit, 0); |
9a9954ba AR |
47 | } |
48 | ||
49 | void | |
50 | Transients::getStats(StoreInfoStats &stats) const | |
51 | { | |
52 | #if TRANSIENT_STATS_SUPPORTED | |
53 | const size_t pageSize = Ipc::Mem::PageSize(); | |
54 | ||
55 | stats.mem.shared = true; | |
56 | stats.mem.capacity = | |
57 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
58 | stats.mem.size = | |
59 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
60 | stats.mem.count = currentCount(); | |
61 | #endif | |
62 | } | |
63 | ||
64 | void | |
65 | Transients::stat(StoreEntry &e) const | |
66 | { | |
67 | storeAppendPrintf(&e, "\n\nTransient Objects\n"); | |
68 | ||
69 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); | |
70 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
71 | currentSize() / 1024.0, | |
72 | Math::doublePercent(currentSize(), maxSize())); | |
73 | ||
74 | if (map) { | |
75 | const int limit = map->entryLimit(); | |
76 | storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); | |
77 | if (limit > 0) { | |
78 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", | |
79 | currentCount(), (100.0 * currentCount() / limit)); | |
80 | } | |
81 | } | |
82 | } | |
83 | ||
84 | void | |
85 | Transients::maintain() | |
86 | { | |
e4d13993 | 87 | // no lazy garbage collection needed |
9a9954ba AR |
88 | } |
89 | ||
90 | uint64_t | |
91 | Transients::minSize() const | |
92 | { | |
93 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
94 | } | |
95 | ||
96 | uint64_t | |
97 | Transients::maxSize() const | |
98 | { | |
99 | // Squid currently does not limit the total size of all transient objects | |
100 | return std::numeric_limits<uint64_t>::max(); | |
101 | } | |
102 | ||
103 | uint64_t | |
104 | Transients::currentSize() const | |
105 | { | |
106 | // TODO: we do not get enough information to calculate this | |
107 | // StoreEntry should update associated stores when its size changes | |
108 | return 0; | |
109 | } | |
110 | ||
111 | uint64_t | |
112 | Transients::currentCount() const | |
113 | { | |
114 | return map ? map->entryCount() : 0; | |
115 | } | |
116 | ||
117 | int64_t | |
118 | Transients::maxObjectSize() const | |
119 | { | |
120 | // Squid currently does not limit the size of a transient object | |
121 | return std::numeric_limits<uint64_t>::max(); | |
122 | } | |
123 | ||
124 | void | |
125 | Transients::reference(StoreEntry &) | |
126 | { | |
e4d13993 | 127 | // no replacement policy (but the cache(s) storing the entry may have one) |
9a9954ba AR |
128 | } |
129 | ||
130 | bool | |
131 | Transients::dereference(StoreEntry &, bool) | |
132 | { | |
133 | // no need to keep e in the global store_table for us; we have our own map | |
134 | return false; | |
135 | } | |
136 | ||
137 | int | |
138 | Transients::callback() | |
139 | { | |
140 | return 0; | |
141 | } | |
142 | ||
143 | StoreSearch * | |
144 | Transients::search(String const, HttpRequest *) | |
145 | { | |
146 | fatal("not implemented"); | |
147 | return NULL; | |
148 | } | |
149 | ||
150 | StoreEntry * | |
151 | Transients::get(const cache_key *key) | |
152 | { | |
153 | if (!map) | |
154 | return NULL; | |
155 | ||
156 | sfileno index; | |
1bfe9ade AR |
157 | const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index); |
158 | if (!anchor) | |
9a9954ba AR |
159 | return NULL; |
160 | ||
6919be24 AR |
161 | // If we already have a local entry, the store_table should have found it. |
162 | // Since it did not, the local entry key must have changed from public to | |
163 | // private. We still need to keep the private entry around for syncing as | |
164 | // its clients depend on it, but we should not allow new clients to join. | |
165 | if (StoreEntry *oldE = locals->at(index)) { | |
166 | debugs(20, 3, "not joining private " << *oldE); | |
167 | assert(EBIT_TEST(oldE->flags, KEY_PRIVATE)); | |
168 | } else if (StoreEntry *newE = copyFromShm(index)) { | |
169 | return newE; // keep read lock to receive updates from others | |
1bfe9ade | 170 | } |
4475555f | 171 | |
6919be24 | 172 | // private entry or loading failure |
4475555f AR |
173 | map->closeForReading(index); |
174 | return NULL; | |
175 | } | |
176 | ||
177 | StoreEntry * | |
178 | Transients::copyFromShm(const sfileno index) | |
179 | { | |
9a9954ba AR |
180 | const TransientsMap::Extras &extras = map->extras(index); |
181 | ||
182 | // create a brand new store entry and initialize it with stored info | |
1bfe9ade | 183 | StoreEntry *e = storeCreatePureEntry(extras.url, extras.url, |
9d4e9cfb | 184 | extras.reqFlags, extras.reqMethod); |
9a9954ba AR |
185 | |
186 | assert(e->mem_obj); | |
187 | e->mem_obj->method = extras.reqMethod; | |
99921d9d | 188 | e->mem_obj->xitTable.io = MemObject::ioReading; |
4475555f | 189 | e->mem_obj->xitTable.index = index; |
9a9954ba | 190 | |
9a9954ba | 191 | e->setPublicKey(); |
ce49546e | 192 | assert(e->key); |
9a9954ba | 193 | |
4475555f AR |
194 | // How do we know its SMP- and not just locally-collapsed? A worker gets |
195 | // locally-collapsed entries from the local store_table, not Transients. | |
196 | // TODO: Can we remove smpCollapsed by not syncing non-transient entries? | |
197 | e->mem_obj->smpCollapsed = true; | |
198 | ||
6919be24 AR |
199 | assert(!locals->at(index)); |
200 | // We do not lock e because we do not want to prevent its destruction; | |
201 | // e is tied to us via mem_obj so we will know when it is destructed. | |
202 | locals->at(index) = e; | |
9a9954ba AR |
203 | return e; |
204 | } | |
205 | ||
206 | void | |
207 | Transients::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData) | |
208 | { | |
209 | // XXX: not needed but Store parent forces us to implement this | |
210 | fatal("Transients::get(key,callback,data) should not be called"); | |
211 | } | |
212 | ||
6919be24 AR |
213 | StoreEntry * |
214 | Transients::findCollapsed(const sfileno index) | |
215 | { | |
216 | if (!map) | |
217 | return NULL; | |
218 | ||
219 | if (StoreEntry *oldE = locals->at(index)) { | |
220 | debugs(20, 5, "found " << *oldE << " at " << index << " in " << MapLabel); | |
221 | assert(oldE->mem_obj && oldE->mem_obj->xitTable.index == index); | |
222 | return oldE; | |
223 | } | |
224 | ||
225 | debugs(20, 3, "no entry at " << index << " in " << MapLabel); | |
226 | return NULL; | |
227 | } | |
228 | ||
9a9954ba | 229 | void |
99921d9d | 230 | Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags, |
9d4e9cfb | 231 | const HttpRequestMethod &reqMethod) |
9a9954ba AR |
232 | { |
233 | assert(e); | |
4475555f AR |
234 | assert(e->mem_obj); |
235 | assert(e->mem_obj->xitTable.index < 0); | |
9a9954ba AR |
236 | |
237 | if (!map) { | |
238 | debugs(20, 5, "No map to add " << *e); | |
239 | return; | |
9d4e9cfb | 240 | } |
9a9954ba AR |
241 | |
242 | sfileno index = 0; | |
243 | Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index); | |
244 | if (!slot) { | |
4475555f | 245 | debugs(20, 5, "collision registering " << *e); |
9a9954ba | 246 | return; |
9d4e9cfb | 247 | } |
9a9954ba AR |
248 | |
249 | try { | |
250 | if (copyToShm(*e, index, reqFlags, reqMethod)) { | |
251 | slot->set(*e); | |
99921d9d | 252 | e->mem_obj->xitTable.io = MemObject::ioWriting; |
4475555f AR |
253 | e->mem_obj->xitTable.index = index; |
254 | map->startAppending(index); | |
255 | // keep write lock -- we will be supplying others with updates | |
9a9954ba | 256 | return; |
9d4e9cfb | 257 | } |
9a9954ba | 258 | // fall through to the error handling code |
9d4e9cfb | 259 | } catch (const std::exception &x) { // TODO: should we catch ... as well? |
9a9954ba AR |
260 | debugs(20, 2, "error keeping entry " << index << |
261 | ' ' << *e << ": " << x.what()); | |
262 | // fall through to the error handling code | |
9d4e9cfb | 263 | } |
9a9954ba | 264 | |
4475555f | 265 | map->abortWriting(index); |
9a9954ba AR |
266 | } |
267 | ||
9a9954ba AR |
268 | /// copies all relevant local data to shared memory |
269 | bool | |
270 | Transients::copyToShm(const StoreEntry &e, const sfileno index, | |
271 | const RequestFlags &reqFlags, | |
272 | const HttpRequestMethod &reqMethod) | |
273 | { | |
274 | TransientsMap::Extras &extras = map->extras(index); | |
275 | ||
276 | const char *url = e.url(); | |
277 | const size_t urlLen = strlen(url); | |
278 | Must(urlLen < sizeof(extras.url)); // we have space to store it all, plus 0 | |
279 | strncpy(extras.url, url, sizeof(extras.url)); | |
9d4e9cfb | 280 | extras.url[urlLen] = '\0'; |
9a9954ba AR |
281 | |
282 | extras.reqFlags = reqFlags; | |
9a9954ba AR |
283 | |
284 | Must(reqMethod != Http::METHOD_OTHER); | |
285 | extras.reqMethod = reqMethod.id(); | |
286 | ||
287 | return true; | |
288 | } | |
289 | ||
290 | void | |
291 | Transients::noteFreeMapSlice(const sfileno sliceId) | |
292 | { | |
293 | // TODO: we should probably find the entry being deleted and abort it | |
294 | } | |
295 | ||
4475555f AR |
296 | void |
297 | Transients::abandon(const StoreEntry &e) | |
298 | { | |
299 | assert(e.mem_obj && map); | |
300 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
1bfe9ade | 301 | CollapsedForwarding::Broadcast(e); |
4475555f AR |
302 | // We do not unlock the entry now because the problem is most likely with |
303 | // the server resource rather than a specific cache writer, so we want to | |
304 | // prevent other readers from collapsing requests for that resource. | |
305 | } | |
306 | ||
307 | bool | |
308 | Transients::abandoned(const StoreEntry &e) const | |
309 | { | |
310 | assert(e.mem_obj); | |
311 | return abandonedAt(e.mem_obj->xitTable.index); | |
312 | } | |
313 | ||
314 | /// whether an in-transit entry at the index is now abandoned by its writer | |
315 | bool | |
316 | Transients::abandonedAt(const sfileno index) const | |
317 | { | |
318 | assert(map); | |
319 | return map->readableEntry(index).waitingToBeFreed; | |
320 | } | |
321 | ||
99921d9d AR |
322 | void |
323 | Transients::completeWriting(const StoreEntry &e) | |
324 | { | |
325 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
326 | assert(e.mem_obj->xitTable.io == MemObject::ioWriting); | |
6919be24 AR |
327 | // there will be no more updates from us after this, so we must prevent |
328 | // future readers from joining | |
329 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
99921d9d AR |
330 | map->closeForWriting(e.mem_obj->xitTable.index); |
331 | e.mem_obj->xitTable.index = -1; | |
332 | e.mem_obj->xitTable.io = MemObject::ioDone; | |
333 | } | |
334 | } | |
335 | ||
d366a7fa AR |
336 | int |
337 | Transients::readers(const StoreEntry &e) const | |
338 | { | |
339 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
340 | assert(map); | |
341 | return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers; | |
342 | } | |
343 | return 0; | |
344 | } | |
345 | ||
1bfe9ade AR |
346 | void |
347 | Transients::markForUnlink(StoreEntry &e) | |
348 | { | |
349 | if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting) | |
350 | abandon(e); | |
351 | } | |
352 | ||
4475555f AR |
353 | void |
354 | Transients::disconnect(MemObject &mem_obj) | |
355 | { | |
99921d9d AR |
356 | if (mem_obj.xitTable.index >= 0) { |
357 | assert(map); | |
358 | if (mem_obj.xitTable.io == MemObject::ioWriting) { | |
359 | map->abortWriting(mem_obj.xitTable.index); | |
360 | } else { | |
361 | assert(mem_obj.xitTable.io == MemObject::ioReading); | |
362 | map->closeForReading(mem_obj.xitTable.index); | |
363 | } | |
6919be24 | 364 | locals->at(mem_obj.xitTable.index) = NULL; |
99921d9d AR |
365 | mem_obj.xitTable.index = -1; |
366 | mem_obj.xitTable.io = MemObject::ioDone; | |
367 | } | |
4475555f AR |
368 | } |
369 | ||
9a9954ba AR |
370 | /// calculates maximum number of entries we need to store and map |
371 | int64_t | |
372 | Transients::EntryLimit() | |
373 | { | |
374 | // TODO: we should also check whether any SMP-aware caching is configured | |
375 | if (!UsingSmp() || !Config.onoff.collapsed_forwarding) | |
376 | return 0; // no SMP collapsed forwarding possible or needed | |
377 | ||
e6d2c263 | 378 | return 16*1024; // TODO: make configurable? |
9a9954ba AR |
379 | } |
380 | ||
381 | /// initializes shared memory segment used by Transients | |
382 | class TransientsRr: public Ipc::Mem::RegisteredRunner | |
383 | { | |
384 | public: | |
385 | /* RegisteredRunner API */ | |
386 | TransientsRr(): mapOwner(NULL) {} | |
387 | virtual void run(const RunnerRegistry &); | |
388 | virtual ~TransientsRr(); | |
389 | ||
390 | protected: | |
391 | virtual void create(const RunnerRegistry &); | |
392 | ||
393 | private: | |
394 | TransientsMap::Owner *mapOwner; | |
395 | }; | |
396 | ||
397 | RunnerRegistrationEntry(rrAfterConfig, TransientsRr); | |
398 | ||
e4d13993 AR |
399 | void |
400 | TransientsRr::run(const RunnerRegistry &r) | |
9a9954ba AR |
401 | { |
402 | assert(Config.memShared.configured()); | |
403 | Ipc::Mem::RegisteredRunner::run(r); | |
404 | } | |
405 | ||
e4d13993 AR |
406 | void |
407 | TransientsRr::create(const RunnerRegistry &) | |
9a9954ba | 408 | { |
9a9954ba AR |
409 | if (!Config.onoff.collapsed_forwarding) |
410 | return; | |
411 | ||
412 | const int64_t entryLimit = Transients::EntryLimit(); | |
9a9954ba AR |
413 | if (entryLimit <= 0) |
414 | return; // no SMP configured or a misconfiguration | |
415 | ||
416 | Must(!mapOwner); | |
417 | mapOwner = TransientsMap::Init(MapLabel, entryLimit); | |
418 | } | |
419 | ||
420 | TransientsRr::~TransientsRr() | |
421 | { | |
422 | delete mapOwner; | |
423 | } |