]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Improved debugging.
[thirdparty/squid.git] / src / MemObject.cc
1
2 /*
3 * DEBUG: section 19 Store Memory Primitives
4 * AUTHOR: Robert Collins
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "comm/Connection.h"
36 #include "Generic.h"
37 #include "globals.h"
38 #include "HttpReply.h"
39 #include "HttpRequest.h"
40 #include "MemBuf.h"
41 #include "MemObject.h"
42 #include "profiler/Profiler.h"
43 #include "SquidConfig.h"
44 #include "Store.h"
45 #include "StoreClient.h"
46
47 #if USE_DELAY_POOLS
48 #include "DelayPools.h"
49 #endif
50
51 /* TODO: make this global or private */
52 #if URL_CHECKSUM_DEBUG
53 static unsigned int url_checksum(const char *url);
54 unsigned int
55 url_checksum(const char *url)
56 {
57 unsigned int ck;
58 SquidMD5_CTX M;
59 static unsigned char digest[16];
60 SquidMD5Init(&M);
61 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
62 SquidMD5Final(digest, &M);
63 memcpy(&ck, digest, sizeof(ck));
64 return ck;
65 }
66
67 #endif
68
69 RemovalPolicy * mem_policy = NULL;
70
71 size_t
72 MemObject::inUseCount()
73 {
74 return Pool().inUseCount();
75 }
76
77 const char *
78 MemObject::storeId() const {
79 if (!storeId_.defined()) {
80 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
81 dump();
82 storeId_ = "[unknown_URI]";
83 }
84 return storeId_.termedBuf();
85 }
86
87 const char *
88 MemObject::logUri() const {
89 return logUri_.defined() ? logUri_.termedBuf() : storeId();
90 }
91
92 bool
93 MemObject::hasUris() const {
94 return storeId_.defined();
95 }
96
97 void
98 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
99 {
100 storeId_ = aStoreId;
101
102 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
103 if (!aLogUri || aLogUri == aStoreId)
104 logUri_.clean(); // use storeId_ by default to minimize copying
105 else
106 logUri_ = aLogUri;
107
108 method = aMethod;
109
110 #if URL_CHECKSUM_DEBUG
111 chksum = url_checksum(urlXXX());
112 #endif
113 }
114
115 MemObject::MemObject(): smpCollapsed(false)
116 {
117 debugs(20, 3, HERE << "new MemObject " << this);
118 _reply = new HttpReply;
119 HTTPMSGLOCK(_reply);
120
121 object_sz = -1;
122
123 /* XXX account log_url */
124
125 swapout.decision = SwapOut::swNeedsCheck;
126 }
127
128 MemObject::~MemObject()
129 {
130 debugs(20, 3, HERE << "del MemObject " << this);
131 const Ctx ctx = ctx_enter(storeId_.termedBuf()); /* XXX: need URI? */
132
133 #if URL_CHECKSUM_DEBUG
134 checkUrlChecksum();
135 #endif
136
137 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
138 // TODO: Consider moving these to destroyMemoryObject
139 if (xitTable.index >= 0)
140 Store::Root().transientsDisconnect(*this);
141 if (memCache.index >= 0)
142 Store::Root().memoryDisconnect(*this);
143
144 assert(xitTable.index < 0);
145 assert(memCache.index < 0);
146 assert(swapout.sio == NULL);
147 }
148
149 data_hdr.freeContent();
150
151 #if 0
152 /*
153 * There is no way to abort FD-less clients, so they might
154 * still have mem->clients set.
155 */
156 assert(clients.head == NULL);
157
158 #endif
159
160 HTTPMSGUNLOCK(_reply);
161
162 HTTPMSGUNLOCK(request);
163
164 ctx_exit(ctx); /* must exit before we free mem->url */
165
166 safe_free(vary_headers);
167 }
168
169 void
170 MemObject::unlinkRequest()
171 {
172 HTTPMSGUNLOCK(request);
173 }
174
175 void
176 MemObject::write(const StoreIOBuffer &writeBuffer)
177 {
178 PROF_start(MemObject_write);
179 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
180
181 /* We don't separate out mime headers yet, so ensure that the first
182 * write is at offset 0 - where they start
183 */
184 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
185
186 assert (data_hdr.write (writeBuffer));
187 PROF_stop(MemObject_write);
188 }
189
190 void
191 MemObject::dump() const
192 {
193 data_hdr.dump();
194 #if 0
195 /* do we want this one? */
196 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
197 #endif
198
199 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
200 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
201 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
202 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
203 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
204 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
205 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
206 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
207 }
208
209 HttpReply const *
210 MemObject::getReply() const
211 {
212 return _reply;
213 }
214
215 void
216 MemObject::replaceHttpReply(HttpReply *newrep)
217 {
218 HTTPMSGUNLOCK(_reply);
219 _reply = newrep;
220 HTTPMSGLOCK(_reply);
221 }
222
223 struct LowestMemReader : public unary_function<store_client, void> {
224 LowestMemReader(int64_t seed):current(seed) {}
225
226 void operator() (store_client const &x) {
227 if (x.memReaderHasLowerOffset(current))
228 current = x.copyInto.offset;
229 }
230
231 int64_t current;
232 };
233
234 struct StoreClientStats : public unary_function<store_client, void> {
235 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
236
237 void operator()(store_client const &x) {
238 x.dumpStats(where, index);
239 ++index;
240 }
241
242 MemBuf *where;
243 size_t index;
244 };
245
246 void
247 MemObject::stat(MemBuf * mb) const
248 {
249 mb->Printf("\t%s %s\n",
250 RequestMethodStr(method), logUri());
251 if (vary_headers)
252 mb->Printf("\tvary_headers: %s\n", vary_headers);
253 mb->Printf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
254 mb->Printf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
255 mb->Printf("\tswapout: %" PRId64 " bytes queued\n",
256 swapout.queue_offset);
257
258 if (swapout.sio.getRaw())
259 mb->Printf("\tswapout: %" PRId64 " bytes written\n",
260 (int64_t) swapout.sio->offset());
261
262 StoreClientStats statsVisitor(mb);
263
264 for_each<StoreClientStats>(clients, statsVisitor);
265 }
266
267 int64_t
268 MemObject::endOffset () const
269 {
270 return data_hdr.endOffset();
271 }
272
273 void
274 MemObject::markEndOfReplyHeaders()
275 {
276 const int hdr_sz = endOffset();
277 assert(hdr_sz >= 0);
278 assert(_reply);
279 _reply->hdr_sz = hdr_sz;
280 }
281
282 int64_t
283 MemObject::size() const
284 {
285 if (object_sz < 0)
286 return endOffset();
287
288 return object_sz;
289 }
290
291 int64_t
292 MemObject::expectedReplySize() const
293 {
294 debugs(20, 7, HERE << "object_sz: " << object_sz);
295 if (object_sz >= 0) // complete() has been called; we know the exact answer
296 return object_sz;
297
298 if (_reply) {
299 const int64_t clen = _reply->bodySize(method);
300 debugs(20, 7, HERE << "clen: " << clen);
301 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
302 return clen + _reply->hdr_sz;
303 }
304
305 return -1; // not enough information to predict
306 }
307
308 void
309 MemObject::reset()
310 {
311 assert(swapout.sio == NULL);
312 data_hdr.freeContent();
313 inmem_lo = 0;
314 /* Should we check for clients? */
315 }
316
317 int64_t
318 MemObject::lowestMemReaderOffset() const
319 {
320 LowestMemReader lowest (endOffset() + 1);
321
322 for_each <LowestMemReader>(clients, lowest);
323
324 return lowest.current;
325 }
326
327 /* XXX: This is wrong. It breaks *badly* on range combining */
328 bool
329 MemObject::readAheadPolicyCanRead() const
330 {
331 const bool canRead = endOffset() - getReply()->hdr_sz <
332 lowestMemReaderOffset() + Config.readAheadGap;
333
334 if (!canRead) {
335 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
336 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
337 }
338
339 return canRead;
340 }
341
342 void
343 MemObject::addClient(store_client *aClient)
344 {
345 ++nclients;
346 dlinkAdd(aClient, &aClient->node, &clients);
347 }
348
349 #if URL_CHECKSUM_DEBUG
350 void
351 MemObject::checkUrlChecksum () const
352 {
353 assert(chksum == url_checksum(urlXXX()));
354 }
355
356 #endif
357
358 /*
359 * How much of the object data is on the disk?
360 */
361 int64_t
362 MemObject::objectBytesOnDisk() const
363 {
364 /*
365 * NOTE: storeOffset() represents the disk file size,
366 * not the amount of object data on disk.
367 *
368 * If we don't have at least 'swap_hdr_sz' bytes
369 * then none of the object data is on disk.
370 *
371 * This should still be safe if swap_hdr_sz == 0,
372 * meaning we haven't even opened the swapout file
373 * yet.
374 */
375
376 if (swapout.sio.getRaw() == NULL)
377 return 0;
378
379 int64_t nwritten = swapout.sio->offset();
380
381 if (nwritten <= (int64_t)swap_hdr_sz)
382 return 0;
383
384 return (nwritten - swap_hdr_sz);
385 }
386
387 int64_t
388 MemObject::policyLowestOffsetToKeep(bool swap) const
389 {
390 /*
391 * Careful. lowest_offset can be greater than endOffset(), such
392 * as in the case of a range request.
393 */
394 int64_t lowest_offset = lowestMemReaderOffset();
395
396 if (endOffset() < lowest_offset ||
397 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
398 (swap && !Config.onoff.memory_cache_first))
399 return lowest_offset;
400
401 return inmem_lo;
402 }
403
404 void
405 MemObject::trimSwappable()
406 {
407 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
408 /*
409 * We should only free up to what we know has been written
410 * to disk, not what has been queued for writing. Otherwise
411 * there will be a chunk of the data which is not in memory
412 * and is not yet on disk.
413 * The -1 makes sure the page isn't freed until storeSwapOut has
414 * walked to the next page.
415 */
416 int64_t on_disk;
417
418 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
419 new_mem_lo = on_disk - 1;
420
421 if (new_mem_lo == -1)
422 new_mem_lo = 0; /* the above might become -1 */
423
424 data_hdr.freeDataUpto(new_mem_lo);
425
426 inmem_lo = new_mem_lo;
427 }
428
429 void
430 MemObject::trimUnSwappable()
431 {
432 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
433 assert (new_mem_lo > 0);
434 data_hdr.freeDataUpto(new_mem_lo);
435 inmem_lo = new_mem_lo;
436 } // else we should not trim anything at this time
437 }
438
439 bool
440 MemObject::isContiguous() const
441 {
442 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
443 /* XXX : make this higher level */
444 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
445 return result;
446 }
447
448 int
449 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
450 {
451 #if USE_DELAY_POOLS
452 if (!ignoreDelayPools) {
453 /* identify delay id with largest allowance */
454 DelayId largestAllowance = mostBytesAllowed ();
455 return largestAllowance.bytesWanted(0, max);
456 }
457 #endif
458
459 return max;
460 }
461
462 void
463 MemObject::setNoDelay(bool const newValue)
464 {
465 #if USE_DELAY_POOLS
466
467 for (dlink_node *node = clients.head; node; node = node->next) {
468 store_client *sc = (store_client *) node->data;
469 sc->delayId.setNoDelay(newValue);
470 }
471
472 #endif
473 }
474
475 void
476 MemObject::delayRead(DeferredRead const &aRead)
477 {
478 deferredReads.delayRead(aRead);
479 }
480
481 void
482 MemObject::kickReads()
483 {
484 deferredReads.kickReads(-1);
485 }
486
487 #if USE_DELAY_POOLS
488 DelayId
489 MemObject::mostBytesAllowed() const
490 {
491 int j;
492 int jmax = -1;
493 DelayId result;
494
495 for (dlink_node *node = clients.head; node; node = node->next) {
496 store_client *sc = (store_client *) node->data;
497 #if 0
498 /* This test is invalid because the client may be writing data
499 * and thus will want data immediately.
500 * If we include the test, there is a race condition when too much
501 * data is read - if all sc's are writing when a read is scheduled.
502 * XXX: fixme.
503 */
504
505 if (!sc->callbackPending())
506 /* not waiting for more data */
507 continue;
508
509 #endif
510
511 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
512
513 if (j > jmax) {
514 jmax = j;
515 result = sc->delayId;
516 }
517 }
518
519 return result;
520 }
521
522 #endif
523
524 int64_t
525 MemObject::availableForSwapOut() const
526 {
527 return endOffset() - swapout.queue_offset;
528 }