]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Merged from parent (large-rock r12530 including trunk r12732; v3.3.3+).
[thirdparty/squid.git] / src / MemObject.cc
1
2 /*
3 * DEBUG: section 19 Store Memory Primitives
4 * AUTHOR: Robert Collins
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #include "comm/Connection.h"
36 #include "Generic.h"
37 #include "globals.h"
38 #include "HttpReply.h"
39 #include "HttpRequest.h"
40 #include "MemBuf.h"
41 #include "MemObject.h"
42 #include "profiler/Profiler.h"
43 #include "SquidConfig.h"
44 #include "Store.h"
45 #include "StoreClient.h"
46
47 #if USE_DELAY_POOLS
48 #include "DelayPools.h"
49 #endif
50
51 /* TODO: make this global or private */
52 #if URL_CHECKSUM_DEBUG
53 static unsigned int url_checksum(const char *url);
54 unsigned int
55 url_checksum(const char *url)
56 {
57 unsigned int ck;
58 SquidMD5_CTX M;
59 static unsigned char digest[16];
60 SquidMD5Init(&M);
61 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
62 SquidMD5Final(digest, &M);
63 memcpy(&ck, digest, sizeof(ck));
64 return ck;
65 }
66
67 #endif
68
69 RemovalPolicy * mem_policy = NULL;
70
71 size_t
72 MemObject::inUseCount()
73 {
74 return Pool().inUseCount();
75 }
76
77 void
78 MemObject::resetUrls(char const *aUrl, char const *aLog_url)
79 {
80 safe_free(url);
81 safe_free(log_url); /* XXX account log_url */
82 log_url = xstrdup(aLog_url);
83 url = xstrdup(aUrl);
84 }
85
86 MemObject::MemObject(char const *aUrl, char const *aLog_url)
87 {
88 debugs(20, 3, HERE << "new MemObject " << this);
89 _reply = new HttpReply;
90 HTTPMSGLOCK(_reply);
91
92 url = xstrdup(aUrl);
93
94 #if URL_CHECKSUM_DEBUG
95
96 chksum = url_checksum(url);
97
98 #endif
99
100 log_url = xstrdup(aLog_url);
101
102 object_sz = -1;
103
104 /* XXX account log_url */
105
106 swapout.decision = SwapOut::swNeedsCheck;
107 }
108
109 MemObject::~MemObject()
110 {
111 debugs(20, 3, HERE << "del MemObject " << this);
112 const Ctx ctx = ctx_enter(url);
113 #if URL_CHECKSUM_DEBUG
114
115 assert(chksum == url_checksum(url));
116 #endif
117
118 if (!shutting_down)
119 assert(swapout.sio == NULL);
120
121 data_hdr.freeContent();
122
123 #if 0
124 /*
125 * There is no way to abort FD-less clients, so they might
126 * still have mem->clients set.
127 */
128 assert(clients.head == NULL);
129
130 #endif
131
132 HTTPMSGUNLOCK(_reply);
133
134 HTTPMSGUNLOCK(request);
135
136 ctx_exit(ctx); /* must exit before we free mem->url */
137
138 safe_free(url);
139
140 safe_free(log_url); /* XXX account log_url */
141
142 safe_free(vary_headers);
143 }
144
145 void
146 MemObject::unlinkRequest()
147 {
148 HTTPMSGUNLOCK(request);
149 }
150
151 void
152 MemObject::write ( StoreIOBuffer writeBuffer, STMCB *callback, void *callbackData)
153 {
154 PROF_start(MemObject_write);
155 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
156
157 /* the offset is into the content, not the headers */
158 writeBuffer.offset += (_reply ? _reply->hdr_sz : 0);
159
160 /* We don't separate out mime headers yet, so ensure that the first
161 * write is at offset 0 - where they start
162 */
163 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
164
165 assert (data_hdr.write (writeBuffer));
166 callback (callbackData, writeBuffer);
167 PROF_stop(MemObject_write);
168 }
169
170 void
171 MemObject::dump() const
172 {
173 data_hdr.dump();
174 #if 0
175 /* do we want this one? */
176 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
177 #endif
178
179 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
180 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
181 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
182 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
183 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
184 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
185 debugs(20, DBG_IMPORTANT, "MemObject->log_url: " << checkNullString(log_url));
186 }
187
188 HttpReply const *
189 MemObject::getReply() const
190 {
191 return _reply;
192 }
193
194 void
195 MemObject::replaceHttpReply(HttpReply *newrep)
196 {
197 HTTPMSGUNLOCK(_reply);
198 _reply = newrep;
199 HTTPMSGLOCK(_reply);
200 }
201
202 struct LowestMemReader : public unary_function<store_client, void> {
203 LowestMemReader(int64_t seed):current(seed) {}
204
205 void operator() (store_client const &x) {
206 if (x.memReaderHasLowerOffset(current))
207 current = x.copyInto.offset;
208 }
209
210 int64_t current;
211 };
212
213 struct StoreClientStats : public unary_function<store_client, void> {
214 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
215
216 void operator()(store_client const &x) {
217 x.dumpStats(where, index);
218 ++index;
219 }
220
221 MemBuf *where;
222 size_t index;
223 };
224
225 void
226 MemObject::stat(MemBuf * mb) const
227 {
228 mb->Printf("\t%s %s\n",
229 RequestMethodStr(method), log_url);
230 if (vary_headers)
231 mb->Printf("\tvary_headers: %s\n", vary_headers);
232 mb->Printf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
233 mb->Printf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
234 mb->Printf("\tswapout: %" PRId64 " bytes queued\n",
235 swapout.queue_offset);
236
237 if (swapout.sio.getRaw())
238 mb->Printf("\tswapout: %" PRId64 " bytes written\n",
239 (int64_t) swapout.sio->offset());
240
241 StoreClientStats statsVisitor(mb);
242
243 for_each<StoreClientStats>(clients, statsVisitor);
244 }
245
246 int64_t
247 MemObject::endOffset () const
248 {
249 return data_hdr.endOffset();
250 }
251
252 void
253 MemObject::markEndOfReplyHeaders()
254 {
255 const int hdr_sz = endOffset();
256 assert(hdr_sz >= 0);
257 assert(_reply);
258 _reply->hdr_sz = hdr_sz;
259 }
260
261 int64_t
262 MemObject::size() const
263 {
264 if (object_sz < 0)
265 return endOffset();
266
267 return object_sz;
268 }
269
270 int64_t
271 MemObject::expectedReplySize() const
272 {
273 debugs(20, 7, HERE << "object_sz: " << object_sz);
274 if (object_sz >= 0) // complete() has been called; we know the exact answer
275 return object_sz;
276
277 if (_reply) {
278 const int64_t clen = _reply->bodySize(method);
279 debugs(20, 7, HERE << "clen: " << clen);
280 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
281 return clen + _reply->hdr_sz;
282 }
283
284 return -1; // not enough information to predict
285 }
286
287 void
288 MemObject::reset()
289 {
290 assert(swapout.sio == NULL);
291 data_hdr.freeContent();
292 inmem_lo = 0;
293 /* Should we check for clients? */
294 }
295
296 int64_t
297 MemObject::lowestMemReaderOffset() const
298 {
299 LowestMemReader lowest (endOffset() + 1);
300
301 for_each <LowestMemReader>(clients, lowest);
302
303 return lowest.current;
304 }
305
306 /* XXX: This is wrong. It breaks *badly* on range combining */
307 bool
308 MemObject::readAheadPolicyCanRead() const
309 {
310 return endOffset() - getReply()->hdr_sz < lowestMemReaderOffset() + Config.readAheadGap;
311 }
312
313 void
314 MemObject::addClient(store_client *aClient)
315 {
316 ++nclients;
317 dlinkAdd(aClient, &aClient->node, &clients);
318 }
319
320 #if URL_CHECKSUM_DEBUG
321 void
322 MemObject::checkUrlChecksum () const
323 {
324 assert(chksum == url_checksum(url));
325 }
326
327 #endif
328
329 /*
330 * How much of the object data is on the disk?
331 */
332 int64_t
333 MemObject::objectBytesOnDisk() const
334 {
335 /*
336 * NOTE: storeOffset() represents the disk file size,
337 * not the amount of object data on disk.
338 *
339 * If we don't have at least 'swap_hdr_sz' bytes
340 * then none of the object data is on disk.
341 *
342 * This should still be safe if swap_hdr_sz == 0,
343 * meaning we haven't even opened the swapout file
344 * yet.
345 */
346
347 if (swapout.sio.getRaw() == NULL)
348 return 0;
349
350 int64_t nwritten = swapout.sio->offset();
351
352 if (nwritten <= (int64_t)swap_hdr_sz)
353 return 0;
354
355 return (nwritten - swap_hdr_sz);
356 }
357
358 int64_t
359 MemObject::policyLowestOffsetToKeep(bool swap) const
360 {
361 /*
362 * Careful. lowest_offset can be greater than endOffset(), such
363 * as in the case of a range request.
364 */
365 int64_t lowest_offset = lowestMemReaderOffset();
366
367 if (endOffset() < lowest_offset ||
368 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
369 (swap && !Config.onoff.memory_cache_first))
370 return lowest_offset;
371
372 return inmem_lo;
373 }
374
375 void
376 MemObject::trimSwappable()
377 {
378 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
379 /*
380 * We should only free up to what we know has been written
381 * to disk, not what has been queued for writing. Otherwise
382 * there will be a chunk of the data which is not in memory
383 * and is not yet on disk.
384 * The -1 makes sure the page isn't freed until storeSwapOut has
385 * walked to the next page.
386 */
387 int64_t on_disk;
388
389 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
390 new_mem_lo = on_disk - 1;
391
392 if (new_mem_lo == -1)
393 new_mem_lo = 0; /* the above might become -1 */
394
395 data_hdr.freeDataUpto(new_mem_lo);
396
397 inmem_lo = new_mem_lo;
398 }
399
400 void
401 MemObject::trimUnSwappable()
402 {
403 int64_t new_mem_lo = policyLowestOffsetToKeep(0);
404 assert (new_mem_lo > 0);
405
406 data_hdr.freeDataUpto(new_mem_lo);
407 inmem_lo = new_mem_lo;
408 }
409
410 bool
411 MemObject::isContiguous() const
412 {
413 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
414 /* XXX : make this higher level */
415 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
416 return result;
417 }
418
419 int
420 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
421 {
422 #if USE_DELAY_POOLS
423 if (!ignoreDelayPools) {
424 /* identify delay id with largest allowance */
425 DelayId largestAllowance = mostBytesAllowed ();
426 return largestAllowance.bytesWanted(0, max);
427 }
428 #endif
429
430 return max;
431 }
432
433 void
434 MemObject::setNoDelay(bool const newValue)
435 {
436 #if USE_DELAY_POOLS
437
438 for (dlink_node *node = clients.head; node; node = node->next) {
439 store_client *sc = (store_client *) node->data;
440 sc->delayId.setNoDelay(newValue);
441 }
442
443 #endif
444 }
445
446 void
447 MemObject::delayRead(DeferredRead const &aRead)
448 {
449 deferredReads.delayRead(aRead);
450 }
451
452 void
453 MemObject::kickReads()
454 {
455 deferredReads.kickReads(-1);
456 }
457
458 #if USE_DELAY_POOLS
459 DelayId
460 MemObject::mostBytesAllowed() const
461 {
462 int j;
463 int jmax = -1;
464 DelayId result;
465
466 for (dlink_node *node = clients.head; node; node = node->next) {
467 store_client *sc = (store_client *) node->data;
468 #if 0
469 /* This test is invalid because the client may be writing data
470 * and thus will want data immediately.
471 * If we include the test, there is a race condition when too much
472 * data is read - if all sc's are writing when a read is scheduled.
473 * XXX: fixme.
474 */
475
476 if (!sc->callbackPending())
477 /* not waiting for more data */
478 continue;
479
480 #endif
481
482 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
483
484 if (j > jmax) {
485 jmax = j;
486 result = sc->delayId;
487 }
488 }
489
490 return result;
491 }
492
493 #endif
494
495 int64_t
496 MemObject::availableForSwapOut() const
497 {
498 return endOffset() - swapout.queue_offset;
499 }