]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
merge from trunk
[thirdparty/squid.git] / src / MemObject.cc
1
2 /*
3 * DEBUG: section 19 Store Memory Primitives
4 * AUTHOR: Robert Collins
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "comm/Connection.h"
36 #include "Generic.h"
37 #include "globals.h"
38 #include "HttpReply.h"
39 #include "HttpRequest.h"
40 #include "MemBuf.h"
41 #include "MemObject.h"
42 #include "profiler/Profiler.h"
43 #include "SquidConfig.h"
44 #include "Store.h"
45 #include "StoreClient.h"
46
47 #if USE_DELAY_POOLS
48 #include "DelayPools.h"
49 #endif
50
51 /* TODO: make this global or private */
52 #if URL_CHECKSUM_DEBUG
53 static unsigned int url_checksum(const char *url);
54 unsigned int
55 url_checksum(const char *url)
56 {
57 unsigned int ck;
58 SquidMD5_CTX M;
59 static unsigned char digest[16];
60 SquidMD5Init(&M);
61 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
62 SquidMD5Final(digest, &M);
63 memcpy(&ck, digest, sizeof(ck));
64 return ck;
65 }
66
67 #endif
68
69 RemovalPolicy * mem_policy = NULL;
70
71 size_t
72 MemObject::inUseCount()
73 {
74 return Pool().inUseCount();
75 }
76
77 const char *
78 MemObject::storeId() const
79 {
80 if (!storeId_.size()) {
81 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
82 dump();
83 storeId_ = "[unknown_URI]";
84 }
85 return storeId_.termedBuf();
86 }
87
88 const char *
89 MemObject::logUri() const
90 {
91 return logUri_.size() ? logUri_.termedBuf() : storeId();
92 }
93
94 bool
95 MemObject::hasUris() const
96 {
97 return storeId_.size();
98 }
99
100 void
101 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
102 {
103 storeId_ = aStoreId;
104
105 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
106 if (!aLogUri || aLogUri == aStoreId)
107 logUri_.clean(); // use storeId_ by default to minimize copying
108 else
109 logUri_ = aLogUri;
110
111 method = aMethod;
112
113 #if URL_CHECKSUM_DEBUG
114 chksum = url_checksum(urlXXX());
115 #endif
116 }
117
118 MemObject::MemObject(): smpCollapsed(false)
119 {
120 debugs(20, 3, HERE << "new MemObject " << this);
121 _reply = new HttpReply;
122 HTTPMSGLOCK(_reply);
123
124 object_sz = -1;
125
126 /* XXX account log_url */
127
128 swapout.decision = SwapOut::swNeedsCheck;
129 }
130
131 MemObject::~MemObject()
132 {
133 debugs(20, 3, HERE << "del MemObject " << this);
134 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
135
136 #if URL_CHECKSUM_DEBUG
137 checkUrlChecksum();
138 #endif
139
140 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
141 assert(xitTable.index < 0);
142 assert(memCache.index < 0);
143 assert(swapout.sio == NULL);
144 }
145
146 data_hdr.freeContent();
147
148 #if 0
149 /*
150 * There is no way to abort FD-less clients, so they might
151 * still have mem->clients set.
152 */
153 assert(clients.head == NULL);
154
155 #endif
156
157 HTTPMSGUNLOCK(_reply);
158
159 HTTPMSGUNLOCK(request);
160
161 ctx_exit(ctx); /* must exit before we free mem->url */
162
163 safe_free(vary_headers);
164 }
165
166 void
167 MemObject::unlinkRequest()
168 {
169 HTTPMSGUNLOCK(request);
170 }
171
172 void
173 MemObject::write(const StoreIOBuffer &writeBuffer)
174 {
175 PROF_start(MemObject_write);
176 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
177
178 /* We don't separate out mime headers yet, so ensure that the first
179 * write is at offset 0 - where they start
180 */
181 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
182
183 assert (data_hdr.write (writeBuffer));
184 PROF_stop(MemObject_write);
185 }
186
187 void
188 MemObject::dump() const
189 {
190 data_hdr.dump();
191 #if 0
192 /* do we want this one? */
193 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
194 #endif
195
196 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
197 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
198 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
199 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
200 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
201 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
202 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
203 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
204 }
205
206 HttpReply const *
207 MemObject::getReply() const
208 {
209 return _reply;
210 }
211
212 void
213 MemObject::replaceHttpReply(HttpReply *newrep)
214 {
215 HTTPMSGUNLOCK(_reply);
216 _reply = newrep;
217 HTTPMSGLOCK(_reply);
218 }
219
220 struct LowestMemReader : public unary_function<store_client, void> {
221 LowestMemReader(int64_t seed):current(seed) {}
222
223 void operator() (store_client const &x) {
224 if (x.memReaderHasLowerOffset(current))
225 current = x.copyInto.offset;
226 }
227
228 int64_t current;
229 };
230
231 struct StoreClientStats : public unary_function<store_client, void> {
232 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
233
234 void operator()(store_client const &x) {
235 x.dumpStats(where, index);
236 ++index;
237 }
238
239 MemBuf *where;
240 size_t index;
241 };
242
243 void
244 MemObject::stat(MemBuf * mb) const
245 {
246 mb->Printf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
247 if (vary_headers)
248 mb->Printf("\tvary_headers: %s\n", vary_headers);
249 mb->Printf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
250 mb->Printf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
251 mb->Printf("\tswapout: %" PRId64 " bytes queued\n",
252 swapout.queue_offset);
253
254 if (swapout.sio.getRaw())
255 mb->Printf("\tswapout: %" PRId64 " bytes written\n",
256 (int64_t) swapout.sio->offset());
257
258 if (xitTable.index >= 0)
259 mb->Printf("\ttransient index: %d state: %d\n",
260 xitTable.index, xitTable.io);
261 if (memCache.index >= 0)
262 mb->Printf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n",
263 memCache.index, memCache.io, memCache.offset);
264 if (object_sz >= 0)
265 mb->Printf("\tobject_sz: %" PRId64 "\n", object_sz);
266 if (smpCollapsed)
267 mb->Printf("\tsmp-collapsed\n");
268
269 StoreClientStats statsVisitor(mb);
270
271 for_each<StoreClientStats>(clients, statsVisitor);
272 }
273
274 int64_t
275 MemObject::endOffset () const
276 {
277 return data_hdr.endOffset();
278 }
279
280 void
281 MemObject::markEndOfReplyHeaders()
282 {
283 const int hdr_sz = endOffset();
284 assert(hdr_sz >= 0);
285 assert(_reply);
286 _reply->hdr_sz = hdr_sz;
287 }
288
289 int64_t
290 MemObject::size() const
291 {
292 if (object_sz < 0)
293 return endOffset();
294
295 return object_sz;
296 }
297
298 int64_t
299 MemObject::expectedReplySize() const
300 {
301 debugs(20, 7, HERE << "object_sz: " << object_sz);
302 if (object_sz >= 0) // complete() has been called; we know the exact answer
303 return object_sz;
304
305 if (_reply) {
306 const int64_t clen = _reply->bodySize(method);
307 debugs(20, 7, HERE << "clen: " << clen);
308 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
309 return clen + _reply->hdr_sz;
310 }
311
312 return -1; // not enough information to predict
313 }
314
315 void
316 MemObject::reset()
317 {
318 assert(swapout.sio == NULL);
319 data_hdr.freeContent();
320 inmem_lo = 0;
321 /* Should we check for clients? */
322 }
323
324 int64_t
325 MemObject::lowestMemReaderOffset() const
326 {
327 LowestMemReader lowest (endOffset() + 1);
328
329 for_each <LowestMemReader>(clients, lowest);
330
331 return lowest.current;
332 }
333
334 /* XXX: This is wrong. It breaks *badly* on range combining */
335 bool
336 MemObject::readAheadPolicyCanRead() const
337 {
338 const bool canRead = endOffset() - getReply()->hdr_sz <
339 lowestMemReaderOffset() + Config.readAheadGap;
340
341 if (!canRead) {
342 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
343 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
344 }
345
346 return canRead;
347 }
348
349 void
350 MemObject::addClient(store_client *aClient)
351 {
352 ++nclients;
353 dlinkAdd(aClient, &aClient->node, &clients);
354 }
355
356 #if URL_CHECKSUM_DEBUG
357 void
358 MemObject::checkUrlChecksum () const
359 {
360 assert(chksum == url_checksum(urlXXX()));
361 }
362
363 #endif
364
365 /*
366 * How much of the object data is on the disk?
367 */
368 int64_t
369 MemObject::objectBytesOnDisk() const
370 {
371 /*
372 * NOTE: storeOffset() represents the disk file size,
373 * not the amount of object data on disk.
374 *
375 * If we don't have at least 'swap_hdr_sz' bytes
376 * then none of the object data is on disk.
377 *
378 * This should still be safe if swap_hdr_sz == 0,
379 * meaning we haven't even opened the swapout file
380 * yet.
381 */
382
383 if (swapout.sio.getRaw() == NULL)
384 return 0;
385
386 int64_t nwritten = swapout.sio->offset();
387
388 if (nwritten <= (int64_t)swap_hdr_sz)
389 return 0;
390
391 return (nwritten - swap_hdr_sz);
392 }
393
394 int64_t
395 MemObject::policyLowestOffsetToKeep(bool swap) const
396 {
397 /*
398 * Careful. lowest_offset can be greater than endOffset(), such
399 * as in the case of a range request.
400 */
401 int64_t lowest_offset = lowestMemReaderOffset();
402
403 if (endOffset() < lowest_offset ||
404 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
405 (swap && !Config.onoff.memory_cache_first))
406 return lowest_offset;
407
408 return inmem_lo;
409 }
410
411 void
412 MemObject::trimSwappable()
413 {
414 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
415 /*
416 * We should only free up to what we know has been written
417 * to disk, not what has been queued for writing. Otherwise
418 * there will be a chunk of the data which is not in memory
419 * and is not yet on disk.
420 * The -1 makes sure the page isn't freed until storeSwapOut has
421 * walked to the next page.
422 */
423 int64_t on_disk;
424
425 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
426 new_mem_lo = on_disk - 1;
427
428 if (new_mem_lo == -1)
429 new_mem_lo = 0; /* the above might become -1 */
430
431 data_hdr.freeDataUpto(new_mem_lo);
432
433 inmem_lo = new_mem_lo;
434 }
435
436 void
437 MemObject::trimUnSwappable()
438 {
439 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
440 assert (new_mem_lo > 0);
441 data_hdr.freeDataUpto(new_mem_lo);
442 inmem_lo = new_mem_lo;
443 } // else we should not trim anything at this time
444 }
445
446 bool
447 MemObject::isContiguous() const
448 {
449 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
450 /* XXX : make this higher level */
451 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
452 return result;
453 }
454
455 int
456 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
457 {
458 #if USE_DELAY_POOLS
459 if (!ignoreDelayPools) {
460 /* identify delay id with largest allowance */
461 DelayId largestAllowance = mostBytesAllowed ();
462 return largestAllowance.bytesWanted(0, max);
463 }
464 #endif
465
466 return max;
467 }
468
469 void
470 MemObject::setNoDelay(bool const newValue)
471 {
472 #if USE_DELAY_POOLS
473
474 for (dlink_node *node = clients.head; node; node = node->next) {
475 store_client *sc = (store_client *) node->data;
476 sc->delayId.setNoDelay(newValue);
477 }
478
479 #endif
480 }
481
482 void
483 MemObject::delayRead(DeferredRead const &aRead)
484 {
485 deferredReads.delayRead(aRead);
486 }
487
488 void
489 MemObject::kickReads()
490 {
491 deferredReads.kickReads(-1);
492 }
493
494 #if USE_DELAY_POOLS
495 DelayId
496 MemObject::mostBytesAllowed() const
497 {
498 int j;
499 int jmax = -1;
500 DelayId result;
501
502 for (dlink_node *node = clients.head; node; node = node->next) {
503 store_client *sc = (store_client *) node->data;
504 #if 0
505 /* This test is invalid because the client may be writing data
506 * and thus will want data immediately.
507 * If we include the test, there is a race condition when too much
508 * data is read - if all sc's are writing when a read is scheduled.
509 * XXX: fixme.
510 */
511
512 if (!sc->callbackPending())
513 /* not waiting for more data */
514 continue;
515
516 #endif
517
518 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
519
520 if (j > jmax) {
521 jmax = j;
522 result = sc->delayId;
523 }
524 }
525
526 return result;
527 }
528
529 #endif
530
531 int64_t
532 MemObject::availableForSwapOut() const
533 {
534 return endOffset() - swapout.queue_offset;
535 }