]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Merged from trunk
[thirdparty/squid.git] / src / MemObject.cc
1
2 /*
3 * $Id: MemObject.cc,v 1.33 2008/01/20 08:54:28 amosjeffries Exp $
4 *
5 * DEBUG: section 19 Store Memory Primitives
6 * AUTHOR: Robert Collins
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "MemObject.h"
38 #include "HttpRequest.h"
39 #include "HttpReply.h"
40 #include "Store.h"
41 #include "StoreClient.h"
42 #include "Generic.h"
43 #if DELAY_POOLS
44 #include "DelayPools.h"
45 #endif
46 #include "MemBuf.h"
47
48 /* TODO: make this global or private */
49 #if URL_CHECKSUM_DEBUG
50 static unsigned int url_checksum(const char *url);
51 unsigned int
52 url_checksum(const char *url)
53 {
54 unsigned int ck;
55 SquidMD5_CTX M;
56 static unsigned char digest[16];
57 SquidMD5Init(&M);
58 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
59 SquidMD5Final(digest, &M);
60 xmemcpy(&ck, digest, sizeof(ck));
61 return ck;
62 }
63
64 #endif
65
66 RemovalPolicy * mem_policy = NULL;
67
68 size_t
69 MemObject::inUseCount()
70 {
71 return Pool().inUseCount();
72 }
73
74 MemObject::MemObject(char const *aUrl, char const *aLog_url)
75 {
76 debugs(20, 3, HERE << "new MemObject " << this);
77 HttpReply *rep = new HttpReply;
78
79 _reply = HTTPMSGLOCK(rep);
80 url = xstrdup(aUrl);
81
82 #if URL_CHECKSUM_DEBUG
83
84 chksum = url_checksum(url);
85
86 #endif
87
88 log_url = xstrdup(aLog_url);
89
90 object_sz = -1;
91
92 /* XXX account log_url */
93 }
94
95 MemObject::~MemObject()
96 {
97 debugs(20, 3, HERE << "del MemObject " << this);
98 const Ctx ctx = ctx_enter(url);
99 #if URL_CHECKSUM_DEBUG
100
101 assert(chksum == url_checksum(url));
102 #endif
103
104 if (!shutting_down)
105 assert(swapout.sio == NULL);
106
107 data_hdr.freeContent();
108
109 #if 0
110 /*
111 * There is no way to abort FD-less clients, so they might
112 * still have mem->clients set.
113 */
114 assert(clients.head == NULL);
115
116 #endif
117
118 HTTPMSGUNLOCK(_reply);
119
120 HTTPMSGUNLOCK(request);
121
122 ctx_exit(ctx); /* must exit before we free mem->url */
123
124 safe_free(url);
125
126 safe_free(log_url); /* XXX account log_url */
127
128 safe_free(vary_headers);
129 }
130
131 void
132 MemObject::unlinkRequest()
133 {
134 HTTPMSGUNLOCK(request);
135 }
136
137 void
138 MemObject::write ( StoreIOBuffer writeBuffer, STMCB *callback, void *callbackData)
139 {
140 PROF_start(MemObject_write);
141 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
142
143 /* the offset is into the content, not the headers */
144 writeBuffer.offset += (_reply ? _reply->hdr_sz : 0);
145
146 /* We don't separate out mime headers yet, so ensure that the first
147 * write is at offset 0 - where they start
148 */
149 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
150
151 assert (data_hdr.write (writeBuffer));
152 callback (callbackData, writeBuffer);
153 PROF_stop(MemObject_write);
154 }
155
156 void
157 MemObject::dump() const
158 {
159 data_hdr.dump();
160 #if 0
161 /* do we want this one? */
162 debugs(20, 1, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
163 #endif
164
165 debugs(20, 1, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
166 debugs(20, 1, "MemObject->inmem_hi: " << data_hdr.endOffset());
167 debugs(20, 1, "MemObject->inmem_lo: " << inmem_lo);
168 debugs(20, 1, "MemObject->nclients: " << nclients);
169 debugs(20, 1, "MemObject->reply: " << _reply);
170 debugs(20, 1, "MemObject->request: " << request);
171 debugs(20, 1, "MemObject->log_url: " << log_url << " " << checkNullString(log_url));
172 }
173
174 HttpReply const *
175 MemObject::getReply() const
176 {
177 return _reply;
178 }
179
180 void
181 MemObject::replaceHttpReply(HttpReply *newrep)
182 {
183 HTTPMSGUNLOCK(_reply);
184 _reply = HTTPMSGLOCK(newrep);
185 }
186
187 struct LowestMemReader : public unary_function<store_client, void> {
188 LowestMemReader(int64_t seed):current(seed) {}
189
190 void operator() (store_client const &x) {
191 if (x.memReaderHasLowerOffset(current))
192 current = x.copyInto.offset;
193 }
194
195 int64_t current;
196 };
197
198 struct StoreClientStats : public unary_function<store_client, void> {
199 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
200
201 void operator()(store_client const &x) {
202 x.dumpStats(where, index++);
203 }
204
205 MemBuf *where;
206 size_t index;
207 };
208
209 void
210 MemObject::stat (MemBuf * mb) const
211 {
212 mb->Printf("\t%s %s\n",
213 RequestMethodStr(method), log_url);
214 mb->Printf("\tinmem_lo: %"PRId64"\n", inmem_lo);
215 mb->Printf("\tinmem_hi: %"PRId64"\n", data_hdr.endOffset());
216 mb->Printf("\tswapout: %"PRId64" bytes queued\n",
217 swapout.queue_offset);
218
219 if (swapout.sio.getRaw())
220 mb->Printf("\tswapout: %"PRId64" bytes written\n",
221 (int64_t) swapout.sio->offset());
222
223 StoreClientStats statsVisitor(mb);
224
225 for_each<StoreClientStats>(clients, statsVisitor);
226 }
227
228 int64_t
229 MemObject::endOffset () const
230 {
231 return data_hdr.endOffset();
232 }
233
234 int64_t
235 MemObject::size() const
236 {
237 if (object_sz < 0)
238 return endOffset();
239
240 return object_sz;
241 }
242
243 void
244 MemObject::reset()
245 {
246 assert(swapout.sio == NULL);
247 data_hdr.freeContent();
248 inmem_lo = 0;
249 /* Should we check for clients? */
250 }
251
252
253 int64_t
254 MemObject::lowestMemReaderOffset() const
255 {
256 LowestMemReader lowest (endOffset() + 1);
257
258 for_each <LowestMemReader>(clients, lowest);
259
260 return lowest.current;
261 }
262
263 /* XXX: This is wrong. It breaks *badly* on range combining */
264 bool
265 MemObject::readAheadPolicyCanRead() const
266 {
267 return endOffset() - getReply()->hdr_sz < lowestMemReaderOffset() + Config.readAheadGap;
268 }
269
270 void
271 MemObject::addClient(store_client *aClient)
272 {
273 ++nclients;
274 dlinkAdd(aClient, &aClient->node, &clients);
275 }
276
277 #if URL_CHECKSUM_DEBUG
278 void
279 MemObject::checkUrlChecksum () const
280 {
281 assert(chksum == url_checksum(url));
282 }
283
284 #endif
285
286 /*
287 * How much of the object data is on the disk?
288 */
289 int64_t
290 MemObject::objectBytesOnDisk() const
291 {
292 /*
293 * NOTE: storeOffset() represents the disk file size,
294 * not the amount of object data on disk.
295 *
296 * If we don't have at least 'swap_hdr_sz' bytes
297 * then none of the object data is on disk.
298 *
299 * This should still be safe if swap_hdr_sz == 0,
300 * meaning we haven't even opened the swapout file
301 * yet.
302 */
303
304 if (swapout.sio.getRaw() == NULL)
305 return 0;
306
307 int64_t nwritten = swapout.sio->offset();
308
309 if (nwritten <= (int64_t)swap_hdr_sz)
310 return 0;
311
312 return (nwritten - swap_hdr_sz);
313 }
314
315 int64_t
316 MemObject::policyLowestOffsetToKeep() const
317 {
318 /*
319 * Careful. lowest_offset can be greater than endOffset(), such
320 * as in the case of a range request.
321 */
322 int64_t lowest_offset = lowestMemReaderOffset();
323
324 if (endOffset() < lowest_offset ||
325 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize)
326 return lowest_offset;
327
328 return inmem_lo;
329 }
330
331 void
332 MemObject::trimSwappable()
333 {
334 int64_t new_mem_lo = policyLowestOffsetToKeep();
335 /*
336 * We should only free up to what we know has been written
337 * to disk, not what has been queued for writing. Otherwise
338 * there will be a chunk of the data which is not in memory
339 * and is not yet on disk.
340 * The -1 makes sure the page isn't freed until storeSwapOut has
341 * walked to the next page. (mem->swapout.memnode)
342 */
343 int64_t on_disk;
344
345 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
346 new_mem_lo = on_disk - 1;
347
348 if (new_mem_lo == -1)
349 new_mem_lo = 0; /* the above might become -1 */
350
351 data_hdr.freeDataUpto(new_mem_lo);
352
353 inmem_lo = new_mem_lo;
354 }
355
356 void
357 MemObject::trimUnSwappable()
358 {
359 int64_t new_mem_lo = policyLowestOffsetToKeep();
360 assert (new_mem_lo > 0);
361
362 data_hdr.freeDataUpto(new_mem_lo);
363 inmem_lo = new_mem_lo;
364 }
365
366
367 bool
368 MemObject::isContiguous() const
369 {
370 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
371 /* XXX : make this higher level */
372 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
373 return result;
374 }
375
376 int
377 MemObject::mostBytesWanted(int max) const
378 {
379 #if DELAY_POOLS
380 /* identify delay id with largest allowance */
381 DelayId largestAllowance = mostBytesAllowed ();
382 return largestAllowance.bytesWanted(0, max);
383 #else
384
385 return max;
386 #endif
387 }
388
389 void
390 MemObject::setNoDelay(bool const newValue)
391 {
392 #if DELAY_POOLS
393
394 for (dlink_node *node = clients.head; node; node = node->next) {
395 store_client *sc = (store_client *) node->data;
396 sc->delayId.setNoDelay(newValue);
397 }
398
399 #endif
400 }
401
402 void
403 MemObject::delayRead(DeferredRead const &aRead)
404 {
405 deferredReads.delayRead(aRead);
406 }
407
408 void
409 MemObject::kickReads()
410 {
411 deferredReads.kickReads(-1);
412 }
413
414 #if DELAY_POOLS
415 DelayId
416 MemObject::mostBytesAllowed() const
417 {
418 int j;
419 int jmax = -1;
420 DelayId result;
421
422 for (dlink_node *node = clients.head; node; node = node->next) {
423 store_client *sc = (store_client *) node->data;
424 #if 0
425 /* This test is invalid because the client may be writing data
426 * and thus will want data immediately.
427 * If we include the test, there is a race condition when too much
428 * data is read - if all sc's are writing when a read is scheduled.
429 * XXX: fixme.
430 */
431
432 if (!sc->callbackPending())
433 /* not waiting for more data */
434 continue;
435
436 #endif
437
438 if (sc->getType() != STORE_MEM_CLIENT)
439 /* reading off disk */
440 continue;
441
442 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
443
444 if (j > jmax) {
445 jmax = j;
446 result = sc->delayId;
447 }
448 }
449
450 return result;
451 }
452
453 #endif