]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
3 * DEBUG: section 19 Store Memory Primitives
4 * AUTHOR: Robert Collins
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
35 #include "comm/Connection.h"
38 #include "HttpReply.h"
39 #include "HttpRequest.h"
41 #include "MemObject.h"
42 #include "profiler/Profiler.h"
43 #include "SquidConfig.h"
45 #include "StoreClient.h"
48 #include "DelayPools.h"
51 /* TODO: make this global or private */
52 #if URL_CHECKSUM_DEBUG
53 static unsigned int url_checksum(const char *url
);
55 url_checksum(const char *url
)
59 static unsigned char digest
[16];
61 SquidMD5Update(&M
, (unsigned char *) url
, strlen(url
));
62 SquidMD5Final(digest
, &M
);
63 memcpy(&ck
, digest
, sizeof(ck
));
69 RemovalPolicy
* mem_policy
= NULL
;
72 MemObject::inUseCount()
74 return Pool().inUseCount();
78 MemObject::storeId() const {
79 if (!storeId_
.defined()) {
80 debugs(20, DBG_IMPORTANT
, "Bug: Missing MemObject::storeId value");
82 storeId_
= "[unknown_URI]";
84 return storeId_
.termedBuf();
88 MemObject::logUri() const {
89 return logUri_
.defined() ? logUri_
.termedBuf() : storeId();
93 MemObject::hasUris() const {
94 return storeId_
.defined();
98 MemObject::setUris(char const *aStoreId
, char const *aLogUri
, const HttpRequestMethod
&aMethod
)
102 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
103 if (!aLogUri
|| aLogUri
== aStoreId
)
104 logUri_
.clean(); // use storeId_ by default to minimize copying
110 #if URL_CHECKSUM_DEBUG
111 chksum
= url_checksum(urlXXX());
115 MemObject::MemObject(): smpCollapsed(false)
117 debugs(20, 3, HERE
<< "new MemObject " << this);
118 _reply
= new HttpReply
;
123 /* XXX account log_url */
125 swapout
.decision
= SwapOut::swNeedsCheck
;
128 MemObject::~MemObject()
130 debugs(20, 3, HERE
<< "del MemObject " << this);
131 const Ctx ctx
= ctx_enter(storeId_
.termedBuf()); /* XXX: need URI? */
133 #if URL_CHECKSUM_DEBUG
137 if (!shutting_down
) { // Store::Root() is FATALly missing during shutdown
138 // TODO: Consider moving these to destroyMemoryObject
139 if (xitTable
.index
>= 0)
140 Store::Root().transientsDisconnect(*this);
141 if (memCache
.index
>= 0)
142 Store::Root().memoryDisconnect(*this);
144 assert(xitTable
.index
< 0);
145 assert(memCache
.index
< 0);
146 assert(swapout
.sio
== NULL
);
149 data_hdr
.freeContent();
153 * There is no way to abort FD-less clients, so they might
154 * still have mem->clients set.
156 assert(clients
.head
== NULL
);
160 HTTPMSGUNLOCK(_reply
);
162 HTTPMSGUNLOCK(request
);
164 ctx_exit(ctx
); /* must exit before we free mem->url */
166 safe_free(vary_headers
);
170 MemObject::unlinkRequest()
172 HTTPMSGUNLOCK(request
);
176 MemObject::write(const StoreIOBuffer
&writeBuffer
)
178 PROF_start(MemObject_write
);
179 debugs(19, 6, "memWrite: offset " << writeBuffer
.offset
<< " len " << writeBuffer
.length
);
181 /* We don't separate out mime headers yet, so ensure that the first
182 * write is at offset 0 - where they start
184 assert (data_hdr
.endOffset() || writeBuffer
.offset
== 0);
186 assert (data_hdr
.write (writeBuffer
));
187 PROF_stop(MemObject_write
);
191 MemObject::dump() const
195 /* do we want this one? */
196 debugs(20, DBG_IMPORTANT
, "MemObject->data.origin_offset: " << (data_hdr
.head
? data_hdr
.head
->nodeBuffer
.offset
: 0));
199 debugs(20, DBG_IMPORTANT
, "MemObject->start_ping: " << start_ping
.tv_sec
<< "."<< std::setfill('0') << std::setw(6) << start_ping
.tv_usec
);
200 debugs(20, DBG_IMPORTANT
, "MemObject->inmem_hi: " << data_hdr
.endOffset());
201 debugs(20, DBG_IMPORTANT
, "MemObject->inmem_lo: " << inmem_lo
);
202 debugs(20, DBG_IMPORTANT
, "MemObject->nclients: " << nclients
);
203 debugs(20, DBG_IMPORTANT
, "MemObject->reply: " << _reply
);
204 debugs(20, DBG_IMPORTANT
, "MemObject->request: " << request
);
205 debugs(20, DBG_IMPORTANT
, "MemObject->logUri: " << logUri_
);
206 debugs(20, DBG_IMPORTANT
, "MemObject->storeId: " << storeId_
);
210 MemObject::getReply() const
216 MemObject::replaceHttpReply(HttpReply
*newrep
)
218 HTTPMSGUNLOCK(_reply
);
223 struct LowestMemReader
: public unary_function
<store_client
, void> {
224 LowestMemReader(int64_t seed
):current(seed
) {}
226 void operator() (store_client
const &x
) {
227 if (x
.memReaderHasLowerOffset(current
))
228 current
= x
.copyInto
.offset
;
234 struct StoreClientStats
: public unary_function
<store_client
, void> {
235 StoreClientStats(MemBuf
*anEntry
):where(anEntry
),index(0) {}
237 void operator()(store_client
const &x
) {
238 x
.dumpStats(where
, index
);
247 MemObject::stat(MemBuf
* mb
) const
249 mb
->Printf("\t%s %s\n",
250 RequestMethodStr(method
), logUri());
252 mb
->Printf("\tvary_headers: %s\n", vary_headers
);
253 mb
->Printf("\tinmem_lo: %" PRId64
"\n", inmem_lo
);
254 mb
->Printf("\tinmem_hi: %" PRId64
"\n", data_hdr
.endOffset());
255 mb
->Printf("\tswapout: %" PRId64
" bytes queued\n",
256 swapout
.queue_offset
);
258 if (swapout
.sio
.getRaw())
259 mb
->Printf("\tswapout: %" PRId64
" bytes written\n",
260 (int64_t) swapout
.sio
->offset());
262 StoreClientStats
statsVisitor(mb
);
264 for_each
<StoreClientStats
>(clients
, statsVisitor
);
268 MemObject::endOffset () const
270 return data_hdr
.endOffset();
274 MemObject::markEndOfReplyHeaders()
276 const int hdr_sz
= endOffset();
279 _reply
->hdr_sz
= hdr_sz
;
283 MemObject::size() const
292 MemObject::expectedReplySize() const
294 debugs(20, 7, HERE
<< "object_sz: " << object_sz
);
295 if (object_sz
>= 0) // complete() has been called; we know the exact answer
299 const int64_t clen
= _reply
->bodySize(method
);
300 debugs(20, 7, HERE
<< "clen: " << clen
);
301 if (clen
>= 0 && _reply
->hdr_sz
> 0) // yuck: HttpMsg sets hdr_sz to 0
302 return clen
+ _reply
->hdr_sz
;
305 return -1; // not enough information to predict
311 assert(swapout
.sio
== NULL
);
312 data_hdr
.freeContent();
314 /* Should we check for clients? */
318 MemObject::lowestMemReaderOffset() const
320 LowestMemReader
lowest (endOffset() + 1);
322 for_each
<LowestMemReader
>(clients
, lowest
);
324 return lowest
.current
;
327 /* XXX: This is wrong. It breaks *badly* on range combining */
329 MemObject::readAheadPolicyCanRead() const
331 const bool canRead
= endOffset() - getReply()->hdr_sz
<
332 lowestMemReaderOffset() + Config
.readAheadGap
;
335 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz
<<
336 " < " << lowestMemReaderOffset() << '+' << Config
.readAheadGap
);
343 MemObject::addClient(store_client
*aClient
)
346 dlinkAdd(aClient
, &aClient
->node
, &clients
);
349 #if URL_CHECKSUM_DEBUG
351 MemObject::checkUrlChecksum () const
353 assert(chksum
== url_checksum(urlXXX()));
359 * How much of the object data is on the disk?
362 MemObject::objectBytesOnDisk() const
365 * NOTE: storeOffset() represents the disk file size,
366 * not the amount of object data on disk.
368 * If we don't have at least 'swap_hdr_sz' bytes
369 * then none of the object data is on disk.
371 * This should still be safe if swap_hdr_sz == 0,
372 * meaning we haven't even opened the swapout file
376 if (swapout
.sio
.getRaw() == NULL
)
379 int64_t nwritten
= swapout
.sio
->offset();
381 if (nwritten
<= (int64_t)swap_hdr_sz
)
384 return (nwritten
- swap_hdr_sz
);
388 MemObject::policyLowestOffsetToKeep(bool swap
) const
391 * Careful. lowest_offset can be greater than endOffset(), such
392 * as in the case of a range request.
394 int64_t lowest_offset
= lowestMemReaderOffset();
396 if (endOffset() < lowest_offset
||
397 endOffset() - inmem_lo
> (int64_t)Config
.Store
.maxInMemObjSize
||
398 (swap
&& !Config
.onoff
.memory_cache_first
))
399 return lowest_offset
;
405 MemObject::trimSwappable()
407 int64_t new_mem_lo
= policyLowestOffsetToKeep(1);
409 * We should only free up to what we know has been written
410 * to disk, not what has been queued for writing. Otherwise
411 * there will be a chunk of the data which is not in memory
412 * and is not yet on disk.
413 * The -1 makes sure the page isn't freed until storeSwapOut has
414 * walked to the next page.
418 if ((on_disk
= objectBytesOnDisk()) - 1 < new_mem_lo
)
419 new_mem_lo
= on_disk
- 1;
421 if (new_mem_lo
== -1)
422 new_mem_lo
= 0; /* the above might become -1 */
424 data_hdr
.freeDataUpto(new_mem_lo
);
426 inmem_lo
= new_mem_lo
;
430 MemObject::trimUnSwappable()
432 if (const int64_t new_mem_lo
= policyLowestOffsetToKeep(false)) {
433 assert (new_mem_lo
> 0);
434 data_hdr
.freeDataUpto(new_mem_lo
);
435 inmem_lo
= new_mem_lo
;
436 } // else we should not trim anything at this time
440 MemObject::isContiguous() const
442 bool result
= data_hdr
.hasContigousContentRange (Range
<int64_t>(inmem_lo
, endOffset()));
443 /* XXX : make this higher level */
444 debugs (19, result
? 4 :3, "MemObject::isContiguous: Returning " << (result
? "true" : "false"));
449 MemObject::mostBytesWanted(int max
, bool ignoreDelayPools
) const
452 if (!ignoreDelayPools
) {
453 /* identify delay id with largest allowance */
454 DelayId largestAllowance
= mostBytesAllowed ();
455 return largestAllowance
.bytesWanted(0, max
);
463 MemObject::setNoDelay(bool const newValue
)
467 for (dlink_node
*node
= clients
.head
; node
; node
= node
->next
) {
468 store_client
*sc
= (store_client
*) node
->data
;
469 sc
->delayId
.setNoDelay(newValue
);
476 MemObject::delayRead(DeferredRead
const &aRead
)
478 deferredReads
.delayRead(aRead
);
482 MemObject::kickReads()
484 deferredReads
.kickReads(-1);
489 MemObject::mostBytesAllowed() const
495 for (dlink_node
*node
= clients
.head
; node
; node
= node
->next
) {
496 store_client
*sc
= (store_client
*) node
->data
;
498 /* This test is invalid because the client may be writing data
499 * and thus will want data immediately.
500 * If we include the test, there is a race condition when too much
501 * data is read - if all sc's are writing when a read is scheduled.
505 if (!sc
->callbackPending())
506 /* not waiting for more data */
511 j
= sc
->delayId
.bytesWanted(0, sc
->copyInto
.length
);
515 result
= sc
->delayId
;
525 MemObject::availableForSwapOut() const
527 return endOffset() - swapout
.queue_offset
;