]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemObject.cc
Adding a comment that tests/testUfs will fail when 'heap' is the only
[thirdparty/squid.git] / src / MemObject.cc
CommitLineData
528b2c61 1
2/*
9f9e06f3 3 * $Id: MemObject.cc,v 1.24 2006/09/20 00:59:26 adrian Exp $
528b2c61 4 *
5 * DEBUG: section 19 Store Memory Primitives
6 * AUTHOR: Robert Collins
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36#include "squid.h"
37#include "MemObject.h"
38#include "HttpRequest.h"
39#include "HttpReply.h"
40#include "Store.h"
41#include "StoreClient.h"
42#include "Generic.h"
b67e2c8c 43#if DELAY_POOLS
44#include "DelayPools.h"
45#endif
0eb49b6d 46#include "MemBuf.h"
528b2c61 47
48/* TODO: make this global or private */
49#if URL_CHECKSUM_DEBUG
50static unsigned int url_checksum(const char *url);
51unsigned int
52url_checksum(const char *url)
53{
54 unsigned int ck;
55 MD5_CTX M;
56 static unsigned char digest[16];
57 MD5Init(&M);
58 MD5Update(&M, (unsigned char *) url, strlen(url));
59 MD5Final(digest, &M);
60 xmemcpy(&ck, digest, sizeof(ck));
61 return ck;
62}
62e76326 63
528b2c61 64#endif
65
aa839030 66RemovalPolicy * mem_policy = NULL;
67
528b2c61 68size_t
69MemObject::inUseCount()
70{
9f9e06f3 71 return Pool().inUseCount();
528b2c61 72}
73
06a5ae20 74MemObject::MemObject(char const *aUrl, char const *aLog_url)
528b2c61 75{
e85137f1 76 debugs(20, 3, HERE << "new MemObject " << this);
4a56ee8d 77 HttpReply *rep = new HttpReply;
78
6dd9f4bd 79 _reply = HTTPMSGLOCK(rep);
528b2c61 80 url = xstrdup(aUrl);
4a56ee8d 81
528b2c61 82#if URL_CHECKSUM_DEBUG
62e76326 83
528b2c61 84 chksum = url_checksum(url);
4a56ee8d 85
528b2c61 86#endif
62e76326 87
528b2c61 88 log_url = xstrdup(aLog_url);
4a56ee8d 89
528b2c61 90 object_sz = -1;
4a56ee8d 91
528b2c61 92 /* XXX account log_url */
528b2c61 93}
94
95MemObject::~MemObject()
96{
e85137f1 97 debugs(20, 3, HERE << "del MemObject " << this);
528b2c61 98 const Ctx ctx = ctx_enter(url);
528b2c61 99#if URL_CHECKSUM_DEBUG
62e76326 100
528b2c61 101 assert(chksum == url_checksum(url));
102#endif
62e76326 103
528b2c61 104 if (!shutting_down)
105 assert(swapout.sio == NULL);
62e76326 106
528b2c61 107 data_hdr.freeContent();
62e76326 108
9cdee68d 109#if 0
528b2c61 110 /*
111 * There is no way to abort FD-less clients, so they might
9cdee68d 112 * still have mem->clients set.
528b2c61 113 */
9cdee68d 114 assert(clients.head == NULL);
115
116#endif
62e76326 117
6dd9f4bd 118 HTTPMSGUNLOCK(_reply);
62e76326 119
6dd9f4bd 120 HTTPMSGUNLOCK(request);
62e76326 121
528b2c61 122 ctx_exit(ctx); /* must exit before we free mem->url */
62e76326 123
528b2c61 124 safe_free(url);
62e76326 125
528b2c61 126 safe_free(log_url); /* XXX account log_url */
62e76326 127
528b2c61 128 safe_free(vary_headers);
129}
130
131void
132MemObject::unlinkRequest()
133{
6dd9f4bd 134 HTTPMSGUNLOCK(request);
528b2c61 135}
136
137void
138MemObject::write ( StoreIOBuffer writeBuffer, STMCB *callback, void *callbackData)
139{
1d5161bd 140 PROF_start(MemObject_write);
23da259f 141 debug(19, 6) ("memWrite: offset %lu len %ld\n", (unsigned long)writeBuffer.offset, (long)writeBuffer.length);
528b2c61 142
143 /* the offset is into the content, not the headers */
144 writeBuffer.offset += (_reply ? _reply->hdr_sz : 0);
145
146 /* We don't separate out mime headers yet, so ensure that the first
147 * write is at offset 0 - where they start
148 */
149 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
150
151 assert (data_hdr.write (writeBuffer));
152 callback (callbackData, writeBuffer);
1d5161bd 153 PROF_stop(MemObject_write);
528b2c61 154}
155
156void
157MemObject::dump() const
158{
42a503bd 159 data_hdr.dump();
528b2c61 160#if 0
161 /* do we want this one? */
162 debug(20, 1) ("MemObject->data.origin_offset: %d\n",
62e76326 163 data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0);
528b2c61 164#endif
62e76326 165
528b2c61 166 debug(20, 1) ("MemObject->start_ping: %d.%06d\n",
62e76326 167 (int) start_ping.tv_sec,
168 (int) start_ping.tv_usec);
528b2c61 169 debug(20, 1) ("MemObject->inmem_hi: %d\n",
62e76326 170 (int) data_hdr.endOffset());
528b2c61 171 debug(20, 1) ("MemObject->inmem_lo: %d\n",
62e76326 172 (int) inmem_lo);
528b2c61 173 debug(20, 1) ("MemObject->nclients: %d\n",
62e76326 174 nclients);
528b2c61 175 debug(20, 1) ("MemObject->reply: %p\n",
62e76326 176 _reply);
528b2c61 177 debug(20, 1) ("MemObject->request: %p\n",
62e76326 178 request);
528b2c61 179 debug(20, 1) ("MemObject->log_url: %p %s\n",
62e76326 180 log_url,
181 checkNullString(log_url));
528b2c61 182}
183
184HttpReply const *
185MemObject::getReply() const
186{
187 return _reply;
188}
189
4a56ee8d 190void
191MemObject::replaceHttpReply(HttpReply *newrep)
192{
6dd9f4bd 193 HTTPMSGUNLOCK(_reply);
194 _reply = HTTPMSGLOCK(newrep);
4a56ee8d 195}
196
528b2c61 197struct LowestMemReader : public unary_function<store_client, void>
198{
199 LowestMemReader(off_t seed):current(seed){}
62e76326 200
201 void operator() (store_client const &x)
202 {
203 if (x.memReaderHasLowerOffset(current))
204 current = x.copyInto.offset;
205 }
206
528b2c61 207 off_t current;
208};
209
210struct StoreClientStats : public unary_function<store_client, void>
211{
fcc35180 212 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0){}
62e76326 213
214 void operator()(store_client const &x)
215 {
216 x.dumpStats(where, index++);
528b2c61 217 }
62e76326 218
fcc35180 219 MemBuf *where;
528b2c61 220 size_t index;
221};
222
223void
fcc35180 224MemObject::stat (MemBuf * mb) const
528b2c61 225{
2fe7eff9 226 mb->Printf("\t%s %s\n",
227 RequestMethodStr[method], log_url);
228 mb->Printf("\tinmem_lo: %d\n", (int) inmem_lo);
229 mb->Printf("\tinmem_hi: %d\n", (int) data_hdr.endOffset());
230 mb->Printf("\tswapout: %d bytes queued\n",
231 (int) swapout.queue_offset);
62e76326 232
528b2c61 233 if (swapout.sio.getRaw())
2fe7eff9 234 mb->Printf("\tswapout: %d bytes written\n",
235 (int) swapout.sio->offset());
62e76326 236
fcc35180 237 StoreClientStats statsVisitor(mb);
62e76326 238
4cbb7fa8 239 for_each<StoreClientStats>(clients, statsVisitor);
528b2c61 240}
241
242off_t
243MemObject::endOffset () const
244{
245 return data_hdr.endOffset();
246}
247
248size_t
249MemObject::size() const
250{
62e76326 251 if (object_sz < 0)
252 return endOffset();
253
528b2c61 254 return object_sz;
255}
256
257void
258MemObject::reset()
259{
260 assert(swapout.sio == NULL);
261 data_hdr.freeContent();
262 inmem_lo = 0;
263 /* Should we check for clients? */
264}
265
266
267off_t
268MemObject::lowestMemReaderOffset() const
269{
270 LowestMemReader lowest (endOffset() + 1);
271
4cbb7fa8 272 for_each <LowestMemReader>(clients, lowest);
62e76326 273
528b2c61 274 return lowest.current;
275}
276
277/* XXX: This is wrong. It breaks *badly* on range combining */
278bool
279MemObject::readAheadPolicyCanRead() const
280{
88dc45c6 281 return (size_t)endOffset() - getReply()->hdr_sz < lowestMemReaderOffset() + (Config.readAheadGap << 10);
528b2c61 282}
283
284void
285MemObject::addClient(store_client *aClient)
286{
287 ++nclients;
288 dlinkAdd(aClient, &aClient->node, &clients);
289}
290
291#if URL_CHECKSUM_DEBUG
292void
293MemObject::checkUrlChecksum () const
294{
295 assert(chksum == url_checksum(url));
296}
62e76326 297
528b2c61 298#endif
299
300/*
301 * How much of the object data is on the disk?
302 */
303size_t
304MemObject::objectBytesOnDisk() const
305{
306 /*
307 * NOTE: storeOffset() represents the disk file size,
308 * not the amount of object data on disk.
309 *
310 * If we don't have at least 'swap_hdr_sz' bytes
311 * then none of the object data is on disk.
312 *
313 * This should still be safe if swap_hdr_sz == 0,
314 * meaning we haven't even opened the swapout file
315 * yet.
316 */
62e76326 317
528b2c61 318 if (swapout.sio.getRaw() == NULL)
62e76326 319 return 0;
320
528b2c61 321 off_t nwritten = swapout.sio->offset();
62e76326 322
528b2c61 323 if (nwritten <= (off_t)swap_hdr_sz)
62e76326 324 return 0;
325
528b2c61 326 return (size_t) (nwritten - swap_hdr_sz);
327}
328
329off_t
330MemObject::policyLowestOffsetToKeep() const
331{
332 /*
333 * Careful. lowest_offset can be greater than endOffset(), such
334 * as in the case of a range request.
335 */
336 off_t lowest_offset = lowestMemReaderOffset();
62e76326 337
528b2c61 338 if (endOffset() < lowest_offset ||
62e76326 339 endOffset() - inmem_lo > (ssize_t)Config.Store.maxInMemObjSize)
340 return lowest_offset;
341
528b2c61 342 return inmem_lo;
343}
344
345void
346MemObject::trimSwappable()
347{
348 off_t new_mem_lo = policyLowestOffsetToKeep();
349 /*
350 * We should only free up to what we know has been written
351 * to disk, not what has been queued for writing. Otherwise
352 * there will be a chunk of the data which is not in memory
353 * and is not yet on disk.
354 * The -1 makes sure the page isn't freed until storeSwapOut has
355 * walked to the next page. (mem->swapout.memnode)
356 */
357 off_t on_disk;
62e76326 358
528b2c61 359 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
62e76326 360 new_mem_lo = on_disk - 1;
361
528b2c61 362 if (new_mem_lo == -1)
62e76326 363 new_mem_lo = 0; /* the above might become -1 */
364
528b2c61 365 data_hdr.freeDataUpto(new_mem_lo);
62e76326 366
528b2c61 367 inmem_lo = new_mem_lo;
368}
369
370void
371MemObject::trimUnSwappable()
372{
373 off_t new_mem_lo = policyLowestOffsetToKeep();
374 assert (new_mem_lo > 0);
375
376 data_hdr.freeDataUpto(new_mem_lo);
377 inmem_lo = new_mem_lo;
378}
379
380
381bool
382MemObject::isContiguous() const
383{
4c50505b 384 bool result = data_hdr.hasContigousContentRange (Range<size_t>(inmem_lo, endOffset()));
528b2c61 385 /* XXX : make this higher level */
a46d2c0e 386 debug (19, result ? 4 :3) ("MemObject::isContiguous: Returning %s\n",
387 result ? "true" : "false");
528b2c61 388 return result;
389}
b67e2c8c 390
391int
392MemObject::mostBytesWanted(int max) const
393{
394#if DELAY_POOLS
b67e2c8c 395 /* identify delay id with largest allowance */
396 DelayId largestAllowance = mostBytesAllowed ();
397 return largestAllowance.bytesWanted(0, max);
398#else
62e76326 399
b67e2c8c 400 return max;
401#endif
402}
403
a46d2c0e 404void
405MemObject::setNoDelay(bool const newValue)
406{
407#if DELAY_POOLS
408
409 for (dlink_node *node = clients.head; node; node = node->next) {
410 store_client *sc = (store_client *) node->data;
411 sc->delayId.setNoDelay(newValue);
412 }
413
414#endif
415}
416
417void
418MemObject::delayRead(DeferredRead const &aRead)
419{
420 deferredReads.delayRead(aRead);
421}
422
423void
424MemObject::kickReads()
425{
426 deferredReads.kickReads(-1);
427}
428
b67e2c8c 429#if DELAY_POOLS
430DelayId
431MemObject::mostBytesAllowed() const
432{
433 int j;
434 int jmax = -1;
435 DelayId result;
62e76326 436
b67e2c8c 437 for (dlink_node *node = clients.head; node; node = node->next) {
62e76326 438 store_client *sc = (store_client *) node->data;
d576a6a6 439#if 0
62e76326 440 /* This test is invalid because the client may be writing data
441 * and thus will want data immediately.
442 * If we include the test, there is a race condition when too much
443 * data is read - if all sc's are writing when a read is scheduled.
444 * XXX: fixme.
445 */
446
447 if (!sc->callbackPending())
448 /* not waiting for more data */
449 continue;
450
d576a6a6 451#endif
62e76326 452
453 if (sc->getType() != STORE_MEM_CLIENT)
454 /* reading off disk */
455 continue;
456
457 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
458
459 if (j > jmax) {
460 jmax = j;
461 result = sc->delayId;
462 }
b67e2c8c 463 }
62e76326 464
b67e2c8c 465 return result;
466}
62e76326 467
b67e2c8c 468#endif