]>
Commit | Line | Data |
---|---|---|
9a9954ba AR |
1 | /* |
2 | * DEBUG: section 20 Storage Manager | |
3 | * | |
4 | */ | |
5 | ||
6 | #include "squid.h" | |
1bfe9ade | 7 | #include "CollapsedForwarding.h" /* XXX: who should broadcast and when? */ |
9a9954ba AR |
8 | #include "base/RunnersRegistry.h" |
9 | #include "HttpReply.h" | |
10 | #include "ipc/mem/Page.h" | |
11 | #include "ipc/mem/Pages.h" | |
12 | #include "MemObject.h" | |
13 | #include "Transients.h" | |
14 | #include "mime_header.h" | |
15 | #include "SquidConfig.h" | |
16 | #include "SquidMath.h" | |
17 | #include "StoreStats.h" | |
18 | #include "tools.h" | |
19 | ||
20 | #if HAVE_LIMITS_H | |
21 | #include <limits> | |
22 | #endif | |
23 | ||
24 | ||
25 | /// shared memory segment path to use for Transients maps | |
26 | static const char *MapLabel = "transients_map"; | |
27 | ||
28 | ||
29 | Transients::Transients(): map(NULL) | |
30 | { | |
9a9954ba AR |
31 | } |
32 | ||
33 | Transients::~Transients() | |
34 | { | |
35 | delete map; | |
36 | } | |
37 | ||
38 | void | |
39 | Transients::init() | |
40 | { | |
41 | const int64_t entryLimit = EntryLimit(); | |
42 | if (entryLimit <= 0) | |
43 | return; // no SMP support or a misconfiguration | |
44 | ||
45 | Must(!map); | |
46 | map = new TransientsMap(MapLabel); | |
47 | map->cleaner = this; | |
48 | } | |
49 | ||
50 | void | |
51 | Transients::getStats(StoreInfoStats &stats) const | |
52 | { | |
53 | #if TRANSIENT_STATS_SUPPORTED | |
54 | const size_t pageSize = Ipc::Mem::PageSize(); | |
55 | ||
56 | stats.mem.shared = true; | |
57 | stats.mem.capacity = | |
58 | Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize; | |
59 | stats.mem.size = | |
60 | Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize; | |
61 | stats.mem.count = currentCount(); | |
62 | #endif | |
63 | } | |
64 | ||
65 | void | |
66 | Transients::stat(StoreEntry &e) const | |
67 | { | |
68 | storeAppendPrintf(&e, "\n\nTransient Objects\n"); | |
69 | ||
70 | storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0); | |
71 | storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n", | |
72 | currentSize() / 1024.0, | |
73 | Math::doublePercent(currentSize(), maxSize())); | |
74 | ||
75 | if (map) { | |
76 | const int limit = map->entryLimit(); | |
77 | storeAppendPrintf(&e, "Maximum entries: %9d\n", limit); | |
78 | if (limit > 0) { | |
79 | storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n", | |
80 | currentCount(), (100.0 * currentCount() / limit)); | |
81 | } | |
82 | } | |
83 | } | |
84 | ||
85 | void | |
86 | Transients::maintain() | |
87 | { | |
88 | } | |
89 | ||
90 | uint64_t | |
91 | Transients::minSize() const | |
92 | { | |
93 | return 0; // XXX: irrelevant, but Store parent forces us to implement this | |
94 | } | |
95 | ||
96 | uint64_t | |
97 | Transients::maxSize() const | |
98 | { | |
99 | // Squid currently does not limit the total size of all transient objects | |
100 | return std::numeric_limits<uint64_t>::max(); | |
101 | } | |
102 | ||
103 | uint64_t | |
104 | Transients::currentSize() const | |
105 | { | |
106 | // TODO: we do not get enough information to calculate this | |
107 | // StoreEntry should update associated stores when its size changes | |
108 | return 0; | |
109 | } | |
110 | ||
111 | uint64_t | |
112 | Transients::currentCount() const | |
113 | { | |
114 | return map ? map->entryCount() : 0; | |
115 | } | |
116 | ||
117 | int64_t | |
118 | Transients::maxObjectSize() const | |
119 | { | |
120 | // Squid currently does not limit the size of a transient object | |
121 | return std::numeric_limits<uint64_t>::max(); | |
122 | } | |
123 | ||
124 | void | |
125 | Transients::reference(StoreEntry &) | |
126 | { | |
127 | } | |
128 | ||
129 | bool | |
130 | Transients::dereference(StoreEntry &, bool) | |
131 | { | |
132 | // no need to keep e in the global store_table for us; we have our own map | |
133 | return false; | |
134 | } | |
135 | ||
136 | int | |
137 | Transients::callback() | |
138 | { | |
139 | return 0; | |
140 | } | |
141 | ||
142 | StoreSearch * | |
143 | Transients::search(String const, HttpRequest *) | |
144 | { | |
145 | fatal("not implemented"); | |
146 | return NULL; | |
147 | } | |
148 | ||
149 | StoreEntry * | |
150 | Transients::get(const cache_key *key) | |
151 | { | |
152 | if (!map) | |
153 | return NULL; | |
154 | ||
155 | sfileno index; | |
1bfe9ade AR |
156 | const Ipc::StoreMapAnchor *anchor = map->openForReading(key, index); |
157 | if (!anchor) | |
9a9954ba AR |
158 | return NULL; |
159 | ||
1bfe9ade AR |
160 | // Without a writer, either the response has been cached already or we will |
161 | // get stuck waiting for it to be cached (because nobody will cache it). | |
162 | if (!anchor->writing()) { | |
163 | debugs(20, 5, "ignoring writer-less entry " << index); | |
164 | } else if (StoreEntry *e = copyFromShm(index)) { | |
4475555f | 165 | return e; // keep read lock to receive updates from others |
1bfe9ade | 166 | } |
4475555f | 167 | |
1bfe9ade | 168 | // missing writer or loading failure |
4475555f AR |
169 | map->closeForReading(index); |
170 | return NULL; | |
171 | } | |
172 | ||
173 | StoreEntry * | |
174 | Transients::copyFromShm(const sfileno index) | |
175 | { | |
9a9954ba AR |
176 | const TransientsMap::Extras &extras = map->extras(index); |
177 | ||
178 | // create a brand new store entry and initialize it with stored info | |
1bfe9ade | 179 | StoreEntry *e = storeCreatePureEntry(extras.url, extras.url, |
9a9954ba | 180 | extras.reqFlags, extras.reqMethod); |
9a9954ba AR |
181 | |
182 | assert(e->mem_obj); | |
183 | e->mem_obj->method = extras.reqMethod; | |
99921d9d | 184 | e->mem_obj->xitTable.io = MemObject::ioReading; |
4475555f | 185 | e->mem_obj->xitTable.index = index; |
9a9954ba | 186 | |
9a9954ba | 187 | e->setPublicKey(); |
ce49546e | 188 | assert(e->key); |
9a9954ba | 189 | |
4475555f AR |
190 | // How do we know its SMP- and not just locally-collapsed? A worker gets |
191 | // locally-collapsed entries from the local store_table, not Transients. | |
192 | // TODO: Can we remove smpCollapsed by not syncing non-transient entries? | |
193 | e->mem_obj->smpCollapsed = true; | |
194 | ||
9a9954ba AR |
195 | return e; |
196 | } | |
197 | ||
198 | void | |
199 | Transients::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData) | |
200 | { | |
201 | // XXX: not needed but Store parent forces us to implement this | |
202 | fatal("Transients::get(key,callback,data) should not be called"); | |
203 | } | |
204 | ||
205 | void | |
99921d9d | 206 | Transients::startWriting(StoreEntry *e, const RequestFlags &reqFlags, |
9a9954ba AR |
207 | const HttpRequestMethod &reqMethod) |
208 | { | |
209 | assert(e); | |
4475555f AR |
210 | assert(e->mem_obj); |
211 | assert(e->mem_obj->xitTable.index < 0); | |
9a9954ba AR |
212 | |
213 | if (!map) { | |
214 | debugs(20, 5, "No map to add " << *e); | |
215 | return; | |
216 | } | |
217 | ||
218 | sfileno index = 0; | |
219 | Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e->key), index); | |
220 | if (!slot) { | |
4475555f | 221 | debugs(20, 5, "collision registering " << *e); |
9a9954ba AR |
222 | return; |
223 | } | |
224 | ||
225 | try { | |
226 | if (copyToShm(*e, index, reqFlags, reqMethod)) { | |
227 | slot->set(*e); | |
99921d9d | 228 | e->mem_obj->xitTable.io = MemObject::ioWriting; |
4475555f AR |
229 | e->mem_obj->xitTable.index = index; |
230 | map->startAppending(index); | |
231 | // keep write lock -- we will be supplying others with updates | |
9a9954ba AR |
232 | return; |
233 | } | |
234 | // fall through to the error handling code | |
235 | } | |
236 | catch (const std::exception &x) { // TODO: should we catch ... as well? | |
237 | debugs(20, 2, "error keeping entry " << index << | |
238 | ' ' << *e << ": " << x.what()); | |
239 | // fall through to the error handling code | |
240 | } | |
241 | ||
4475555f | 242 | map->abortWriting(index); |
9a9954ba AR |
243 | } |
244 | ||
245 | ||
246 | /// copies all relevant local data to shared memory | |
247 | bool | |
248 | Transients::copyToShm(const StoreEntry &e, const sfileno index, | |
249 | const RequestFlags &reqFlags, | |
250 | const HttpRequestMethod &reqMethod) | |
251 | { | |
252 | TransientsMap::Extras &extras = map->extras(index); | |
253 | ||
254 | const char *url = e.url(); | |
255 | const size_t urlLen = strlen(url); | |
256 | Must(urlLen < sizeof(extras.url)); // we have space to store it all, plus 0 | |
257 | strncpy(extras.url, url, sizeof(extras.url)); | |
258 | extras.url[urlLen] = '\0'; | |
259 | ||
260 | extras.reqFlags = reqFlags; | |
9a9954ba AR |
261 | |
262 | Must(reqMethod != Http::METHOD_OTHER); | |
263 | extras.reqMethod = reqMethod.id(); | |
264 | ||
265 | return true; | |
266 | } | |
267 | ||
268 | void | |
269 | Transients::noteFreeMapSlice(const sfileno sliceId) | |
270 | { | |
271 | // TODO: we should probably find the entry being deleted and abort it | |
272 | } | |
273 | ||
4475555f AR |
274 | void |
275 | Transients::abandon(const StoreEntry &e) | |
276 | { | |
277 | assert(e.mem_obj && map); | |
278 | map->freeEntry(e.mem_obj->xitTable.index); // just marks the locked entry | |
1bfe9ade | 279 | CollapsedForwarding::Broadcast(e); |
4475555f AR |
280 | // We do not unlock the entry now because the problem is most likely with |
281 | // the server resource rather than a specific cache writer, so we want to | |
282 | // prevent other readers from collapsing requests for that resource. | |
283 | } | |
284 | ||
285 | bool | |
286 | Transients::abandoned(const StoreEntry &e) const | |
287 | { | |
288 | assert(e.mem_obj); | |
289 | return abandonedAt(e.mem_obj->xitTable.index); | |
290 | } | |
291 | ||
292 | /// whether an in-transit entry at the index is now abandoned by its writer | |
293 | bool | |
294 | Transients::abandonedAt(const sfileno index) const | |
295 | { | |
296 | assert(map); | |
297 | return map->readableEntry(index).waitingToBeFreed; | |
298 | } | |
299 | ||
99921d9d AR |
300 | void |
301 | Transients::completeWriting(const StoreEntry &e) | |
302 | { | |
303 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
304 | assert(e.mem_obj->xitTable.io == MemObject::ioWriting); | |
305 | map->closeForWriting(e.mem_obj->xitTable.index); | |
306 | e.mem_obj->xitTable.index = -1; | |
307 | e.mem_obj->xitTable.io = MemObject::ioDone; | |
308 | } | |
309 | } | |
310 | ||
d366a7fa AR |
311 | int |
312 | Transients::readers(const StoreEntry &e) const | |
313 | { | |
314 | if (e.mem_obj && e.mem_obj->xitTable.index >= 0) { | |
315 | assert(map); | |
316 | return map->peekAtEntry(e.mem_obj->xitTable.index).lock.readers; | |
317 | } | |
318 | return 0; | |
319 | } | |
320 | ||
1bfe9ade AR |
321 | void |
322 | Transients::markForUnlink(StoreEntry &e) | |
323 | { | |
324 | if (e.mem_obj && e.mem_obj->xitTable.io == MemObject::ioWriting) | |
325 | abandon(e); | |
326 | } | |
327 | ||
4475555f AR |
328 | void |
329 | Transients::disconnect(MemObject &mem_obj) | |
330 | { | |
99921d9d AR |
331 | if (mem_obj.xitTable.index >= 0) { |
332 | assert(map); | |
333 | if (mem_obj.xitTable.io == MemObject::ioWriting) { | |
334 | map->abortWriting(mem_obj.xitTable.index); | |
335 | } else { | |
336 | assert(mem_obj.xitTable.io == MemObject::ioReading); | |
337 | map->closeForReading(mem_obj.xitTable.index); | |
338 | } | |
339 | mem_obj.xitTable.index = -1; | |
340 | mem_obj.xitTable.io = MemObject::ioDone; | |
341 | } | |
4475555f AR |
342 | } |
343 | ||
9a9954ba AR |
344 | /// calculates maximum number of entries we need to store and map |
345 | int64_t | |
346 | Transients::EntryLimit() | |
347 | { | |
348 | // TODO: we should also check whether any SMP-aware caching is configured | |
349 | if (!UsingSmp() || !Config.onoff.collapsed_forwarding) | |
350 | return 0; // no SMP collapsed forwarding possible or needed | |
351 | ||
352 | return 16*1024; // XXX: make configurable | |
353 | } | |
354 | ||
355 | /// initializes shared memory segment used by Transients | |
356 | class TransientsRr: public Ipc::Mem::RegisteredRunner | |
357 | { | |
358 | public: | |
359 | /* RegisteredRunner API */ | |
360 | TransientsRr(): mapOwner(NULL) {} | |
361 | virtual void run(const RunnerRegistry &); | |
362 | virtual ~TransientsRr(); | |
363 | ||
364 | protected: | |
365 | virtual void create(const RunnerRegistry &); | |
366 | ||
367 | private: | |
368 | TransientsMap::Owner *mapOwner; | |
369 | }; | |
370 | ||
371 | RunnerRegistrationEntry(rrAfterConfig, TransientsRr); | |
372 | ||
373 | void TransientsRr::run(const RunnerRegistry &r) | |
374 | { | |
375 | assert(Config.memShared.configured()); | |
376 | Ipc::Mem::RegisteredRunner::run(r); | |
377 | } | |
378 | ||
379 | void TransientsRr::create(const RunnerRegistry &) | |
380 | { | |
9a9954ba AR |
381 | if (!Config.onoff.collapsed_forwarding) |
382 | return; | |
383 | ||
384 | const int64_t entryLimit = Transients::EntryLimit(); | |
9a9954ba AR |
385 | if (entryLimit <= 0) |
386 | return; // no SMP configured or a misconfiguration | |
387 | ||
388 | Must(!mapOwner); | |
389 | mapOwner = TransientsMap::Init(MapLabel, entryLimit); | |
390 | } | |
391 | ||
392 | TransientsRr::~TransientsRr() | |
393 | { | |
394 | delete mapOwner; | |
395 | } |