]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
SMP Caching: Core changes, IPC primitives, Shared memory cache, and Rock Store
[thirdparty/squid.git] / src / MemObject.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 19 Store Memory Primitives
6 * AUTHOR: Robert Collins
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "comm/Connection.h"
38 #include "MemObject.h"
39 #include "HttpRequest.h"
40 #include "HttpReply.h"
41 #include "Store.h"
42 #include "StoreClient.h"
43 #include "Generic.h"
44 #if USE_DELAY_POOLS
45 #include "DelayPools.h"
46 #endif
47 #include "MemBuf.h"
48
49 /* TODO: make this global or private */
50 #if URL_CHECKSUM_DEBUG
51 static unsigned int url_checksum(const char *url);
52 unsigned int
53 url_checksum(const char *url)
54 {
55 unsigned int ck;
56 SquidMD5_CTX M;
57 static unsigned char digest[16];
58 SquidMD5Init(&M);
59 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
60 SquidMD5Final(digest, &M);
61 memcpy(&ck, digest, sizeof(ck));
62 return ck;
63 }
64
65 #endif
66
67 RemovalPolicy * mem_policy = NULL;
68
69 size_t
70 MemObject::inUseCount()
71 {
72 return Pool().inUseCount();
73 }
74
75 void
76 MemObject::resetUrls(char const *aUrl, char const *aLog_url)
77 {
78 safe_free(url);
79 safe_free(log_url); /* XXX account log_url */
80 log_url = xstrdup(aLog_url);
81 url = xstrdup(aUrl);
82 }
83
84 MemObject::MemObject(char const *aUrl, char const *aLog_url)
85 {
86 debugs(20, 3, HERE << "new MemObject " << this);
87 HttpReply *rep = new HttpReply;
88
89 _reply = HTTPMSGLOCK(rep);
90 url = xstrdup(aUrl);
91
92 #if URL_CHECKSUM_DEBUG
93
94 chksum = url_checksum(url);
95
96 #endif
97
98 log_url = xstrdup(aLog_url);
99
100 object_sz = -1;
101
102 /* XXX account log_url */
103
104 swapout.decision = SwapOut::swNeedsCheck;
105 }
106
107 MemObject::~MemObject()
108 {
109 debugs(20, 3, HERE << "del MemObject " << this);
110 const Ctx ctx = ctx_enter(url);
111 #if URL_CHECKSUM_DEBUG
112
113 assert(chksum == url_checksum(url));
114 #endif
115
116 if (!shutting_down)
117 assert(swapout.sio == NULL);
118
119 data_hdr.freeContent();
120
121 #if 0
122 /*
123 * There is no way to abort FD-less clients, so they might
124 * still have mem->clients set.
125 */
126 assert(clients.head == NULL);
127
128 #endif
129
130 HTTPMSGUNLOCK(_reply);
131
132 HTTPMSGUNLOCK(request);
133
134 ctx_exit(ctx); /* must exit before we free mem->url */
135
136 safe_free(url);
137
138 safe_free(log_url); /* XXX account log_url */
139
140 safe_free(vary_headers);
141 }
142
143 void
144 MemObject::unlinkRequest()
145 {
146 HTTPMSGUNLOCK(request);
147 }
148
149 void
150 MemObject::write ( StoreIOBuffer writeBuffer, STMCB *callback, void *callbackData)
151 {
152 PROF_start(MemObject_write);
153 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
154
155 /* the offset is into the content, not the headers */
156 writeBuffer.offset += (_reply ? _reply->hdr_sz : 0);
157
158 /* We don't separate out mime headers yet, so ensure that the first
159 * write is at offset 0 - where they start
160 */
161 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
162
163 assert (data_hdr.write (writeBuffer));
164 callback (callbackData, writeBuffer);
165 PROF_stop(MemObject_write);
166 }
167
168 void
169 MemObject::dump() const
170 {
171 data_hdr.dump();
172 #if 0
173 /* do we want this one? */
174 debugs(20, 1, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
175 #endif
176
177 debugs(20, 1, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
178 debugs(20, 1, "MemObject->inmem_hi: " << data_hdr.endOffset());
179 debugs(20, 1, "MemObject->inmem_lo: " << inmem_lo);
180 debugs(20, 1, "MemObject->nclients: " << nclients);
181 debugs(20, 1, "MemObject->reply: " << _reply);
182 debugs(20, 1, "MemObject->request: " << request);
183 debugs(20, 1, "MemObject->log_url: " << log_url << " " << checkNullString(log_url));
184 }
185
186 HttpReply const *
187 MemObject::getReply() const
188 {
189 return _reply;
190 }
191
192 void
193 MemObject::replaceHttpReply(HttpReply *newrep)
194 {
195 HTTPMSGUNLOCK(_reply);
196 _reply = HTTPMSGLOCK(newrep);
197 }
198
199 struct LowestMemReader : public unary_function<store_client, void> {
200 LowestMemReader(int64_t seed):current(seed) {}
201
202 void operator() (store_client const &x) {
203 if (x.memReaderHasLowerOffset(current))
204 current = x.copyInto.offset;
205 }
206
207 int64_t current;
208 };
209
210 struct StoreClientStats : public unary_function<store_client, void> {
211 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
212
213 void operator()(store_client const &x) {
214 x.dumpStats(where, index++);
215 }
216
217 MemBuf *where;
218 size_t index;
219 };
220
221 void
222 MemObject::stat(MemBuf * mb) const
223 {
224 mb->Printf("\t%s %s\n",
225 RequestMethodStr(method), log_url);
226 if (vary_headers)
227 mb->Printf("\tvary_headers: %s\n", vary_headers);
228 mb->Printf("\tinmem_lo: %"PRId64"\n", inmem_lo);
229 mb->Printf("\tinmem_hi: %"PRId64"\n", data_hdr.endOffset());
230 mb->Printf("\tswapout: %"PRId64" bytes queued\n",
231 swapout.queue_offset);
232
233 if (swapout.sio.getRaw())
234 mb->Printf("\tswapout: %"PRId64" bytes written\n",
235 (int64_t) swapout.sio->offset());
236
237 StoreClientStats statsVisitor(mb);
238
239 for_each<StoreClientStats>(clients, statsVisitor);
240 }
241
242 int64_t
243 MemObject::endOffset () const
244 {
245 return data_hdr.endOffset();
246 }
247
248 void
249 MemObject::markEndOfReplyHeaders()
250 {
251 const int hdr_sz = endOffset();
252 assert(hdr_sz >= 0);
253 assert(_reply);
254 _reply->hdr_sz = hdr_sz;
255 }
256
257 int64_t
258 MemObject::size() const
259 {
260 if (object_sz < 0)
261 return endOffset();
262
263 return object_sz;
264 }
265
266 int64_t
267 MemObject::expectedReplySize() const
268 {
269 debugs(20, 7, HERE << "object_sz: " << object_sz);
270 if (object_sz >= 0) // complete() has been called; we know the exact answer
271 return object_sz;
272
273 if (_reply) {
274 const int64_t clen = _reply->bodySize(method);
275 debugs(20, 7, HERE << "clen: " << clen);
276 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
277 return clen + _reply->hdr_sz;
278 }
279
280 return -1; // not enough information to predict
281 }
282
283 void
284 MemObject::reset()
285 {
286 assert(swapout.sio == NULL);
287 data_hdr.freeContent();
288 inmem_lo = 0;
289 /* Should we check for clients? */
290 }
291
292
293 int64_t
294 MemObject::lowestMemReaderOffset() const
295 {
296 LowestMemReader lowest (endOffset() + 1);
297
298 for_each <LowestMemReader>(clients, lowest);
299
300 return lowest.current;
301 }
302
303 /* XXX: This is wrong. It breaks *badly* on range combining */
304 bool
305 MemObject::readAheadPolicyCanRead() const
306 {
307 return endOffset() - getReply()->hdr_sz < lowestMemReaderOffset() + Config.readAheadGap;
308 }
309
310 void
311 MemObject::addClient(store_client *aClient)
312 {
313 ++nclients;
314 dlinkAdd(aClient, &aClient->node, &clients);
315 }
316
317 #if URL_CHECKSUM_DEBUG
318 void
319 MemObject::checkUrlChecksum () const
320 {
321 assert(chksum == url_checksum(url));
322 }
323
324 #endif
325
326 /*
327 * How much of the object data is on the disk?
328 */
329 int64_t
330 MemObject::objectBytesOnDisk() const
331 {
332 /*
333 * NOTE: storeOffset() represents the disk file size,
334 * not the amount of object data on disk.
335 *
336 * If we don't have at least 'swap_hdr_sz' bytes
337 * then none of the object data is on disk.
338 *
339 * This should still be safe if swap_hdr_sz == 0,
340 * meaning we haven't even opened the swapout file
341 * yet.
342 */
343
344 if (swapout.sio.getRaw() == NULL)
345 return 0;
346
347 int64_t nwritten = swapout.sio->offset();
348
349 if (nwritten <= (int64_t)swap_hdr_sz)
350 return 0;
351
352 return (nwritten - swap_hdr_sz);
353 }
354
355 int64_t
356 MemObject::policyLowestOffsetToKeep(bool swap) const
357 {
358 /*
359 * Careful. lowest_offset can be greater than endOffset(), such
360 * as in the case of a range request.
361 */
362 int64_t lowest_offset = lowestMemReaderOffset();
363
364 if (endOffset() < lowest_offset ||
365 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
366 (swap && !Config.onoff.memory_cache_first))
367 return lowest_offset;
368
369 return inmem_lo;
370 }
371
372 void
373 MemObject::trimSwappable()
374 {
375 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
376 /*
377 * We should only free up to what we know has been written
378 * to disk, not what has been queued for writing. Otherwise
379 * there will be a chunk of the data which is not in memory
380 * and is not yet on disk.
381 * The -1 makes sure the page isn't freed until storeSwapOut has
382 * walked to the next page.
383 */
384 int64_t on_disk;
385
386 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
387 new_mem_lo = on_disk - 1;
388
389 if (new_mem_lo == -1)
390 new_mem_lo = 0; /* the above might become -1 */
391
392 data_hdr.freeDataUpto(new_mem_lo);
393
394 inmem_lo = new_mem_lo;
395 }
396
397 void
398 MemObject::trimUnSwappable()
399 {
400 int64_t new_mem_lo = policyLowestOffsetToKeep(0);
401 assert (new_mem_lo > 0);
402
403 data_hdr.freeDataUpto(new_mem_lo);
404 inmem_lo = new_mem_lo;
405 }
406
407
408 bool
409 MemObject::isContiguous() const
410 {
411 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
412 /* XXX : make this higher level */
413 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
414 return result;
415 }
416
417 int
418 MemObject::mostBytesWanted(int max) const
419 {
420 #if USE_DELAY_POOLS
421 /* identify delay id with largest allowance */
422 DelayId largestAllowance = mostBytesAllowed ();
423 return largestAllowance.bytesWanted(0, max);
424 #else
425
426 return max;
427 #endif
428 }
429
430 void
431 MemObject::setNoDelay(bool const newValue)
432 {
433 #if USE_DELAY_POOLS
434
435 for (dlink_node *node = clients.head; node; node = node->next) {
436 store_client *sc = (store_client *) node->data;
437 sc->delayId.setNoDelay(newValue);
438 }
439
440 #endif
441 }
442
443 void
444 MemObject::delayRead(DeferredRead const &aRead)
445 {
446 deferredReads.delayRead(aRead);
447 }
448
449 void
450 MemObject::kickReads()
451 {
452 deferredReads.kickReads(-1);
453 }
454
455 #if USE_DELAY_POOLS
456 DelayId
457 MemObject::mostBytesAllowed() const
458 {
459 int j;
460 int jmax = -1;
461 DelayId result;
462
463 for (dlink_node *node = clients.head; node; node = node->next) {
464 store_client *sc = (store_client *) node->data;
465 #if 0
466 /* This test is invalid because the client may be writing data
467 * and thus will want data immediately.
468 * If we include the test, there is a race condition when too much
469 * data is read - if all sc's are writing when a read is scheduled.
470 * XXX: fixme.
471 */
472
473 if (!sc->callbackPending())
474 /* not waiting for more data */
475 continue;
476
477 #endif
478
479 if (sc->getType() != STORE_MEM_CLIENT)
480 /* reading off disk */
481 continue;
482
483 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
484
485 if (j > jmax) {
486 jmax = j;
487 result = sc->delayId;
488 }
489 }
490
491 return result;
492 }
493
494 #endif