]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Merge from trunk rev.13584
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2014 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "HttpRequest.h"
17 #include "MemBuf.h"
18 #include "MemObject.h"
19 #include "profiler/Profiler.h"
20 #include "SquidConfig.h"
21 #include "Store.h"
22 #include "StoreClient.h"
23
24 #if USE_DELAY_POOLS
25 #include "DelayPools.h"
26 #endif
27
28 /* TODO: make this global or private */
29 #if URL_CHECKSUM_DEBUG
30 static unsigned int url_checksum(const char *url);
31 unsigned int
32 url_checksum(const char *url)
33 {
34 unsigned int ck;
35 SquidMD5_CTX M;
36 static unsigned char digest[16];
37 SquidMD5Init(&M);
38 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
39 SquidMD5Final(digest, &M);
40 memcpy(&ck, digest, sizeof(ck));
41 return ck;
42 }
43
44 #endif
45
46 RemovalPolicy * mem_policy = NULL;
47
48 size_t
49 MemObject::inUseCount()
50 {
51 return Pool().inUseCount();
52 }
53
54 const char *
55 MemObject::storeId() const
56 {
57 if (!storeId_.size()) {
58 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
59 dump();
60 storeId_ = "[unknown_URI]";
61 }
62 return storeId_.termedBuf();
63 }
64
65 const char *
66 MemObject::logUri() const
67 {
68 return logUri_.size() ? logUri_.termedBuf() : storeId();
69 }
70
71 bool
72 MemObject::hasUris() const
73 {
74 return storeId_.size();
75 }
76
77 void
78 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
79 {
80 storeId_ = aStoreId;
81
82 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
83 if (!aLogUri || aLogUri == aStoreId)
84 logUri_.clean(); // use storeId_ by default to minimize copying
85 else
86 logUri_ = aLogUri;
87
88 method = aMethod;
89
90 #if URL_CHECKSUM_DEBUG
91 chksum = url_checksum(urlXXX());
92 #endif
93 }
94
95 MemObject::MemObject(): smpCollapsed(false)
96 {
97 debugs(20, 3, HERE << "new MemObject " << this);
98 _reply = new HttpReply;
99 HTTPMSGLOCK(_reply);
100
101 object_sz = -1;
102
103 /* XXX account log_url */
104
105 swapout.decision = SwapOut::swNeedsCheck;
106 }
107
108 MemObject::~MemObject()
109 {
110 debugs(20, 3, HERE << "del MemObject " << this);
111 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
112
113 #if URL_CHECKSUM_DEBUG
114 checkUrlChecksum();
115 #endif
116
117 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
118 assert(xitTable.index < 0);
119 assert(memCache.index < 0);
120 assert(swapout.sio == NULL);
121 }
122
123 data_hdr.freeContent();
124
125 #if 0
126 /*
127 * There is no way to abort FD-less clients, so they might
128 * still have mem->clients set.
129 */
130 assert(clients.head == NULL);
131
132 #endif
133
134 HTTPMSGUNLOCK(_reply);
135
136 HTTPMSGUNLOCK(request);
137
138 ctx_exit(ctx); /* must exit before we free mem->url */
139
140 safe_free(vary_headers);
141 }
142
143 void
144 MemObject::unlinkRequest()
145 {
146 HTTPMSGUNLOCK(request);
147 }
148
149 void
150 MemObject::write(const StoreIOBuffer &writeBuffer)
151 {
152 PROF_start(MemObject_write);
153 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
154
155 /* We don't separate out mime headers yet, so ensure that the first
156 * write is at offset 0 - where they start
157 */
158 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
159
160 assert (data_hdr.write (writeBuffer));
161 PROF_stop(MemObject_write);
162 }
163
164 void
165 MemObject::dump() const
166 {
167 data_hdr.dump();
168 #if 0
169 /* do we want this one? */
170 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
171 #endif
172
173 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
174 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
175 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
176 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
177 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
178 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
179 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
180 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
181 }
182
183 HttpReply const *
184 MemObject::getReply() const
185 {
186 return _reply;
187 }
188
189 void
190 MemObject::replaceHttpReply(HttpReply *newrep)
191 {
192 HTTPMSGUNLOCK(_reply);
193 _reply = newrep;
194 HTTPMSGLOCK(_reply);
195 }
196
197 struct LowestMemReader : public unary_function<store_client, void> {
198 LowestMemReader(int64_t seed):current(seed) {}
199
200 void operator() (store_client const &x) {
201 if (x.memReaderHasLowerOffset(current))
202 current = x.copyInto.offset;
203 }
204
205 int64_t current;
206 };
207
208 struct StoreClientStats : public unary_function<store_client, void> {
209 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
210
211 void operator()(store_client const &x) {
212 x.dumpStats(where, index);
213 ++index;
214 }
215
216 MemBuf *where;
217 size_t index;
218 };
219
220 void
221 MemObject::stat(MemBuf * mb) const
222 {
223 mb->Printf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
224 if (vary_headers)
225 mb->Printf("\tvary_headers: %s\n", vary_headers);
226 mb->Printf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
227 mb->Printf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
228 mb->Printf("\tswapout: %" PRId64 " bytes queued\n",
229 swapout.queue_offset);
230
231 if (swapout.sio.getRaw())
232 mb->Printf("\tswapout: %" PRId64 " bytes written\n",
233 (int64_t) swapout.sio->offset());
234
235 if (xitTable.index >= 0)
236 mb->Printf("\ttransient index: %d state: %d\n",
237 xitTable.index, xitTable.io);
238 if (memCache.index >= 0)
239 mb->Printf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n",
240 memCache.index, memCache.io, memCache.offset);
241 if (object_sz >= 0)
242 mb->Printf("\tobject_sz: %" PRId64 "\n", object_sz);
243 if (smpCollapsed)
244 mb->Printf("\tsmp-collapsed\n");
245
246 StoreClientStats statsVisitor(mb);
247
248 for_each<StoreClientStats>(clients, statsVisitor);
249 }
250
251 int64_t
252 MemObject::endOffset () const
253 {
254 return data_hdr.endOffset();
255 }
256
257 void
258 MemObject::markEndOfReplyHeaders()
259 {
260 const int hdr_sz = endOffset();
261 assert(hdr_sz >= 0);
262 assert(_reply);
263 _reply->hdr_sz = hdr_sz;
264 }
265
266 int64_t
267 MemObject::size() const
268 {
269 if (object_sz < 0)
270 return endOffset();
271
272 return object_sz;
273 }
274
275 int64_t
276 MemObject::expectedReplySize() const
277 {
278 debugs(20, 7, HERE << "object_sz: " << object_sz);
279 if (object_sz >= 0) // complete() has been called; we know the exact answer
280 return object_sz;
281
282 if (_reply) {
283 const int64_t clen = _reply->bodySize(method);
284 debugs(20, 7, HERE << "clen: " << clen);
285 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
286 return clen + _reply->hdr_sz;
287 }
288
289 return -1; // not enough information to predict
290 }
291
292 void
293 MemObject::reset()
294 {
295 assert(swapout.sio == NULL);
296 data_hdr.freeContent();
297 inmem_lo = 0;
298 /* Should we check for clients? */
299 }
300
301 int64_t
302 MemObject::lowestMemReaderOffset() const
303 {
304 LowestMemReader lowest (endOffset() + 1);
305
306 for_each <LowestMemReader>(clients, lowest);
307
308 return lowest.current;
309 }
310
311 /* XXX: This is wrong. It breaks *badly* on range combining */
312 bool
313 MemObject::readAheadPolicyCanRead() const
314 {
315 const bool canRead = endOffset() - getReply()->hdr_sz <
316 lowestMemReaderOffset() + Config.readAheadGap;
317
318 if (!canRead) {
319 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
320 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
321 }
322
323 return canRead;
324 }
325
326 void
327 MemObject::addClient(store_client *aClient)
328 {
329 ++nclients;
330 dlinkAdd(aClient, &aClient->node, &clients);
331 }
332
333 #if URL_CHECKSUM_DEBUG
334 void
335 MemObject::checkUrlChecksum () const
336 {
337 assert(chksum == url_checksum(urlXXX()));
338 }
339
340 #endif
341
342 /*
343 * How much of the object data is on the disk?
344 */
345 int64_t
346 MemObject::objectBytesOnDisk() const
347 {
348 /*
349 * NOTE: storeOffset() represents the disk file size,
350 * not the amount of object data on disk.
351 *
352 * If we don't have at least 'swap_hdr_sz' bytes
353 * then none of the object data is on disk.
354 *
355 * This should still be safe if swap_hdr_sz == 0,
356 * meaning we haven't even opened the swapout file
357 * yet.
358 */
359
360 if (swapout.sio.getRaw() == NULL)
361 return 0;
362
363 int64_t nwritten = swapout.sio->offset();
364
365 if (nwritten <= (int64_t)swap_hdr_sz)
366 return 0;
367
368 return (nwritten - swap_hdr_sz);
369 }
370
371 int64_t
372 MemObject::policyLowestOffsetToKeep(bool swap) const
373 {
374 /*
375 * Careful. lowest_offset can be greater than endOffset(), such
376 * as in the case of a range request.
377 */
378 int64_t lowest_offset = lowestMemReaderOffset();
379
380 if (endOffset() < lowest_offset ||
381 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
382 (swap && !Config.onoff.memory_cache_first))
383 return lowest_offset;
384
385 return inmem_lo;
386 }
387
388 void
389 MemObject::trimSwappable()
390 {
391 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
392 /*
393 * We should only free up to what we know has been written
394 * to disk, not what has been queued for writing. Otherwise
395 * there will be a chunk of the data which is not in memory
396 * and is not yet on disk.
397 * The -1 makes sure the page isn't freed until storeSwapOut has
398 * walked to the next page.
399 */
400 int64_t on_disk;
401
402 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
403 new_mem_lo = on_disk - 1;
404
405 if (new_mem_lo == -1)
406 new_mem_lo = 0; /* the above might become -1 */
407
408 data_hdr.freeDataUpto(new_mem_lo);
409
410 inmem_lo = new_mem_lo;
411 }
412
413 void
414 MemObject::trimUnSwappable()
415 {
416 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
417 assert (new_mem_lo > 0);
418 data_hdr.freeDataUpto(new_mem_lo);
419 inmem_lo = new_mem_lo;
420 } // else we should not trim anything at this time
421 }
422
423 bool
424 MemObject::isContiguous() const
425 {
426 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
427 /* XXX : make this higher level */
428 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
429 return result;
430 }
431
432 int
433 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
434 {
435 #if USE_DELAY_POOLS
436 if (!ignoreDelayPools) {
437 /* identify delay id with largest allowance */
438 DelayId largestAllowance = mostBytesAllowed ();
439 return largestAllowance.bytesWanted(0, max);
440 }
441 #endif
442
443 return max;
444 }
445
446 void
447 MemObject::setNoDelay(bool const newValue)
448 {
449 #if USE_DELAY_POOLS
450
451 for (dlink_node *node = clients.head; node; node = node->next) {
452 store_client *sc = (store_client *) node->data;
453 sc->delayId.setNoDelay(newValue);
454 }
455
456 #endif
457 }
458
459 void
460 MemObject::delayRead(DeferredRead const &aRead)
461 {
462 deferredReads.delayRead(aRead);
463 }
464
465 void
466 MemObject::kickReads()
467 {
468 deferredReads.kickReads(-1);
469 }
470
471 #if USE_DELAY_POOLS
472 DelayId
473 MemObject::mostBytesAllowed() const
474 {
475 int j;
476 int jmax = -1;
477 DelayId result;
478
479 for (dlink_node *node = clients.head; node; node = node->next) {
480 store_client *sc = (store_client *) node->data;
481 #if 0
482 /* This test is invalid because the client may be writing data
483 * and thus will want data immediately.
484 * If we include the test, there is a race condition when too much
485 * data is read - if all sc's are writing when a read is scheduled.
486 * XXX: fixme.
487 */
488
489 if (!sc->callbackPending())
490 /* not waiting for more data */
491 continue;
492
493 #endif
494
495 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
496
497 if (j > jmax) {
498 jmax = j;
499 result = sc->delayId;
500 }
501 }
502
503 return result;
504 }
505
506 #endif
507
508 int64_t
509 MemObject::availableForSwapOut() const
510 {
511 return endOffset() - swapout.queue_offset;
512 }