]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Author: Steven Lawrance <squid@moonlightdesign.org>
[thirdparty/squid.git] / src / MemObject.cc
1
2 /*
3 * $Id$
4 *
5 * DEBUG: section 19 Store Memory Primitives
6 * AUTHOR: Robert Collins
7 *
8 * SQUID Web Proxy Cache http://www.squid-cache.org/
9 * ----------------------------------------------------------
10 *
11 * Squid is the result of efforts by numerous individuals from
12 * the Internet community; see the CONTRIBUTORS file for full
13 * details. Many organizations have provided support for Squid's
14 * development; see the SPONSORS file for full details. Squid is
15 * Copyrighted (C) 2001 by the Regents of the University of
16 * California; see the COPYRIGHT file for full details. Squid
17 * incorporates software developed and/or copyrighted by other
18 * sources; see the CREDITS file for full details.
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
33 *
34 */
35
36 #include "squid.h"
37 #include "MemObject.h"
38 #include "HttpRequest.h"
39 #include "HttpReply.h"
40 #include "Store.h"
41 #include "StoreClient.h"
42 #include "Generic.h"
43 #if USE_DELAY_POOLS
44 #include "DelayPools.h"
45 #endif
46 #include "MemBuf.h"
47
48 /* TODO: make this global or private */
49 #if URL_CHECKSUM_DEBUG
50 static unsigned int url_checksum(const char *url);
51 unsigned int
52 url_checksum(const char *url)
53 {
54 unsigned int ck;
55 SquidMD5_CTX M;
56 static unsigned char digest[16];
57 SquidMD5Init(&M);
58 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
59 SquidMD5Final(digest, &M);
60 memcpy(&ck, digest, sizeof(ck));
61 return ck;
62 }
63
64 #endif
65
66 RemovalPolicy * mem_policy = NULL;
67
68 size_t
69 MemObject::inUseCount()
70 {
71 return Pool().inUseCount();
72 }
73
74 MemObject::MemObject(char const *aUrl, char const *aLog_url)
75 {
76 debugs(20, 3, HERE << "new MemObject " << this);
77 HttpReply *rep = new HttpReply;
78
79 _reply = HTTPMSGLOCK(rep);
80 url = xstrdup(aUrl);
81
82 #if URL_CHECKSUM_DEBUG
83
84 chksum = url_checksum(url);
85
86 #endif
87
88 log_url = xstrdup(aLog_url);
89
90 object_sz = -1;
91
92 /* XXX account log_url */
93 }
94
95 MemObject::~MemObject()
96 {
97 debugs(20, 3, HERE << "del MemObject " << this);
98 const Ctx ctx = ctx_enter(url);
99 #if URL_CHECKSUM_DEBUG
100
101 assert(chksum == url_checksum(url));
102 #endif
103
104 if (!shutting_down)
105 assert(swapout.sio == NULL);
106
107 data_hdr.freeContent();
108
109 #if 0
110 /*
111 * There is no way to abort FD-less clients, so they might
112 * still have mem->clients set.
113 */
114 assert(clients.head == NULL);
115
116 #endif
117
118 HTTPMSGUNLOCK(_reply);
119
120 HTTPMSGUNLOCK(request);
121
122 ctx_exit(ctx); /* must exit before we free mem->url */
123
124 safe_free(url);
125
126 safe_free(log_url); /* XXX account log_url */
127
128 safe_free(vary_headers);
129 }
130
131 void
132 MemObject::unlinkRequest()
133 {
134 HTTPMSGUNLOCK(request);
135 }
136
137 void
138 MemObject::write ( StoreIOBuffer writeBuffer, STMCB *callback, void *callbackData)
139 {
140 PROF_start(MemObject_write);
141 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
142
143 /* the offset is into the content, not the headers */
144 writeBuffer.offset += (_reply ? _reply->hdr_sz : 0);
145
146 /* We don't separate out mime headers yet, so ensure that the first
147 * write is at offset 0 - where they start
148 */
149 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
150
151 assert (data_hdr.write (writeBuffer));
152 callback (callbackData, writeBuffer);
153 PROF_stop(MemObject_write);
154 }
155
156 void
157 MemObject::dump() const
158 {
159 data_hdr.dump();
160 #if 0
161 /* do we want this one? */
162 debugs(20, 1, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
163 #endif
164
165 debugs(20, 1, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
166 debugs(20, 1, "MemObject->inmem_hi: " << data_hdr.endOffset());
167 debugs(20, 1, "MemObject->inmem_lo: " << inmem_lo);
168 debugs(20, 1, "MemObject->nclients: " << nclients);
169 debugs(20, 1, "MemObject->reply: " << _reply);
170 debugs(20, 1, "MemObject->request: " << request);
171 debugs(20, 1, "MemObject->log_url: " << log_url << " " << checkNullString(log_url));
172 }
173
174 HttpReply const *
175 MemObject::getReply() const
176 {
177 return _reply;
178 }
179
180 void
181 MemObject::replaceHttpReply(HttpReply *newrep)
182 {
183 HTTPMSGUNLOCK(_reply);
184 _reply = HTTPMSGLOCK(newrep);
185 }
186
187 struct LowestMemReader : public unary_function<store_client, void> {
188 LowestMemReader(int64_t seed):current(seed) {}
189
190 void operator() (store_client const &x) {
191 if (x.memReaderHasLowerOffset(current))
192 current = x.copyInto.offset;
193 }
194
195 int64_t current;
196 };
197
198 struct StoreClientStats : public unary_function<store_client, void> {
199 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
200
201 void operator()(store_client const &x) {
202 x.dumpStats(where, index++);
203 }
204
205 MemBuf *where;
206 size_t index;
207 };
208
209 void
210 MemObject::stat(MemBuf * mb) const
211 {
212 mb->Printf("\t%s %s\n",
213 RequestMethodStr(method), log_url);
214 if (vary_headers)
215 mb->Printf("\tvary_headers: %s\n", vary_headers);
216 mb->Printf("\tinmem_lo: %"PRId64"\n", inmem_lo);
217 mb->Printf("\tinmem_hi: %"PRId64"\n", data_hdr.endOffset());
218 mb->Printf("\tswapout: %"PRId64" bytes queued\n",
219 swapout.queue_offset);
220
221 if (swapout.sio.getRaw())
222 mb->Printf("\tswapout: %"PRId64" bytes written\n",
223 (int64_t) swapout.sio->offset());
224
225 StoreClientStats statsVisitor(mb);
226
227 for_each<StoreClientStats>(clients, statsVisitor);
228 }
229
230 int64_t
231 MemObject::endOffset () const
232 {
233 return data_hdr.endOffset();
234 }
235
236 int64_t
237 MemObject::size() const
238 {
239 if (object_sz < 0)
240 return endOffset();
241
242 return object_sz;
243 }
244
245 void
246 MemObject::reset()
247 {
248 assert(swapout.sio == NULL);
249 data_hdr.freeContent();
250 inmem_lo = 0;
251 /* Should we check for clients? */
252 }
253
254
255 int64_t
256 MemObject::lowestMemReaderOffset() const
257 {
258 LowestMemReader lowest (endOffset() + 1);
259
260 for_each <LowestMemReader>(clients, lowest);
261
262 return lowest.current;
263 }
264
265 /* XXX: This is wrong. It breaks *badly* on range combining */
266 bool
267 MemObject::readAheadPolicyCanRead() const
268 {
269 return endOffset() - getReply()->hdr_sz < lowestMemReaderOffset() + Config.readAheadGap;
270 }
271
272 void
273 MemObject::addClient(store_client *aClient)
274 {
275 ++nclients;
276 dlinkAdd(aClient, &aClient->node, &clients);
277 }
278
279 #if URL_CHECKSUM_DEBUG
280 void
281 MemObject::checkUrlChecksum () const
282 {
283 assert(chksum == url_checksum(url));
284 }
285
286 #endif
287
288 /*
289 * How much of the object data is on the disk?
290 */
291 int64_t
292 MemObject::objectBytesOnDisk() const
293 {
294 /*
295 * NOTE: storeOffset() represents the disk file size,
296 * not the amount of object data on disk.
297 *
298 * If we don't have at least 'swap_hdr_sz' bytes
299 * then none of the object data is on disk.
300 *
301 * This should still be safe if swap_hdr_sz == 0,
302 * meaning we haven't even opened the swapout file
303 * yet.
304 */
305
306 if (swapout.sio.getRaw() == NULL)
307 return 0;
308
309 int64_t nwritten = swapout.sio->offset();
310
311 if (nwritten <= (int64_t)swap_hdr_sz)
312 return 0;
313
314 return (nwritten - swap_hdr_sz);
315 }
316
317 int64_t
318 MemObject::policyLowestOffsetToKeep(bool swap) const
319 {
320 /*
321 * Careful. lowest_offset can be greater than endOffset(), such
322 * as in the case of a range request.
323 */
324 int64_t lowest_offset = lowestMemReaderOffset();
325
326 if (endOffset() < lowest_offset ||
327 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
328 (swap && !Config.onoff.memory_cache_first))
329 return lowest_offset;
330
331 return inmem_lo;
332 }
333
334 void
335 MemObject::trimSwappable()
336 {
337 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
338 /*
339 * We should only free up to what we know has been written
340 * to disk, not what has been queued for writing. Otherwise
341 * there will be a chunk of the data which is not in memory
342 * and is not yet on disk.
343 * The -1 makes sure the page isn't freed until storeSwapOut has
344 * walked to the next page. (mem->swapout.memnode)
345 */
346 int64_t on_disk;
347
348 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
349 new_mem_lo = on_disk - 1;
350
351 if (new_mem_lo == -1)
352 new_mem_lo = 0; /* the above might become -1 */
353
354 data_hdr.freeDataUpto(new_mem_lo);
355
356 inmem_lo = new_mem_lo;
357 }
358
359 void
360 MemObject::trimUnSwappable()
361 {
362 int64_t new_mem_lo = policyLowestOffsetToKeep(0);
363 assert (new_mem_lo > 0);
364
365 data_hdr.freeDataUpto(new_mem_lo);
366 inmem_lo = new_mem_lo;
367 }
368
369
370 bool
371 MemObject::isContiguous() const
372 {
373 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
374 /* XXX : make this higher level */
375 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
376 return result;
377 }
378
379 int
380 MemObject::mostBytesWanted(int max) const
381 {
382 #if USE_DELAY_POOLS
383 /* identify delay id with largest allowance */
384 DelayId largestAllowance = mostBytesAllowed ();
385 return largestAllowance.bytesWanted(0, max);
386 #else
387
388 return max;
389 #endif
390 }
391
392 void
393 MemObject::setNoDelay(bool const newValue)
394 {
395 #if USE_DELAY_POOLS
396
397 for (dlink_node *node = clients.head; node; node = node->next) {
398 store_client *sc = (store_client *) node->data;
399 sc->delayId.setNoDelay(newValue);
400 }
401
402 #endif
403 }
404
405 void
406 MemObject::delayRead(DeferredRead const &aRead)
407 {
408 deferredReads.delayRead(aRead);
409 }
410
411 void
412 MemObject::kickReads()
413 {
414 deferredReads.kickReads(-1);
415 }
416
417 #if USE_DELAY_POOLS
418 DelayId
419 MemObject::mostBytesAllowed() const
420 {
421 int j;
422 int jmax = -1;
423 DelayId result;
424
425 for (dlink_node *node = clients.head; node; node = node->next) {
426 store_client *sc = (store_client *) node->data;
427 #if 0
428 /* This test is invalid because the client may be writing data
429 * and thus will want data immediately.
430 * If we include the test, there is a race condition when too much
431 * data is read - if all sc's are writing when a read is scheduled.
432 * XXX: fixme.
433 */
434
435 if (!sc->callbackPending())
436 /* not waiting for more data */
437 continue;
438
439 #endif
440
441 if (sc->getType() != STORE_MEM_CLIENT)
442 /* reading off disk */
443 continue;
444
445 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
446
447 if (j > jmax) {
448 jmax = j;
449 result = sc->delayId;
450 }
451 }
452
453 return result;
454 }
455
456 #endif