]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 19 Store Memory Primitives */
12 #include "comm/Connection.h"
15 #include "HttpReply.h"
16 #include "HttpRequest.h"
18 #include "MemObject.h"
19 #include "profiler/Profiler.h"
20 #include "SquidConfig.h"
22 #include "StoreClient.h"
25 #include "DelayPools.h"
28 /* TODO: make this global or private */
29 #if URL_CHECKSUM_DEBUG
30 static unsigned int url_checksum(const char *url
);
32 url_checksum(const char *url
)
36 static unsigned char digest
[16];
38 SquidMD5Update(&M
, (unsigned char *) url
, strlen(url
));
39 SquidMD5Final(digest
, &M
);
40 memcpy(&ck
, digest
, sizeof(ck
));
46 RemovalPolicy
* mem_policy
= NULL
;
49 MemObject::inUseCount()
51 return Pool().inUseCount();
55 MemObject::storeId() const
57 if (!storeId_
.size()) {
58 debugs(20, DBG_IMPORTANT
, "Bug: Missing MemObject::storeId value");
60 storeId_
= "[unknown_URI]";
62 return storeId_
.termedBuf();
66 MemObject::logUri() const
68 return logUri_
.size() ? logUri_
.termedBuf() : storeId();
72 MemObject::hasUris() const
74 return storeId_
.size();
78 MemObject::setUris(char const *aStoreId
, char const *aLogUri
, const HttpRequestMethod
&aMethod
)
82 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
83 if (!aLogUri
|| aLogUri
== aStoreId
)
84 logUri_
.clean(); // use storeId_ by default to minimize copying
90 #if URL_CHECKSUM_DEBUG
91 chksum
= url_checksum(urlXXX());
95 MemObject::MemObject() :
100 ping_reply_callback(nullptr),
105 #if URL_CHECKSUM_DEBUG
108 vary_headers(nullptr)
110 debugs(20, 3, "new MemObject " << this);
111 memset(&start_ping
, 0, sizeof(start_ping
));
112 memset(&abort
, 0, sizeof(abort
));
113 _reply
= new HttpReply
;
117 MemObject::~MemObject()
119 debugs(20, 3, "del MemObject " << this);
120 const Ctx ctx
= ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
122 #if URL_CHECKSUM_DEBUG
126 if (!shutting_down
) { // Store::Root() is FATALly missing during shutdown
127 assert(xitTable
.index
< 0);
128 assert(memCache
.index
< 0);
129 assert(swapout
.sio
== NULL
);
132 data_hdr
.freeContent();
136 * There is no way to abort FD-less clients, so they might
137 * still have mem->clients set.
139 assert(clients
.head
== NULL
);
143 HTTPMSGUNLOCK(_reply
);
145 HTTPMSGUNLOCK(request
);
147 ctx_exit(ctx
); /* must exit before we free mem->url */
151 MemObject::unlinkRequest()
153 HTTPMSGUNLOCK(request
);
157 MemObject::write(const StoreIOBuffer
&writeBuffer
)
159 PROF_start(MemObject_write
);
160 debugs(19, 6, "memWrite: offset " << writeBuffer
.offset
<< " len " << writeBuffer
.length
);
162 /* We don't separate out mime headers yet, so ensure that the first
163 * write is at offset 0 - where they start
165 assert (data_hdr
.endOffset() || writeBuffer
.offset
== 0);
167 assert (data_hdr
.write (writeBuffer
));
168 PROF_stop(MemObject_write
);
172 MemObject::dump() const
176 /* do we want this one? */
177 debugs(20, DBG_IMPORTANT
, "MemObject->data.origin_offset: " << (data_hdr
.head
? data_hdr
.head
->nodeBuffer
.offset
: 0));
180 debugs(20, DBG_IMPORTANT
, "MemObject->start_ping: " << start_ping
.tv_sec
<< "."<< std::setfill('0') << std::setw(6) << start_ping
.tv_usec
);
181 debugs(20, DBG_IMPORTANT
, "MemObject->inmem_hi: " << data_hdr
.endOffset());
182 debugs(20, DBG_IMPORTANT
, "MemObject->inmem_lo: " << inmem_lo
);
183 debugs(20, DBG_IMPORTANT
, "MemObject->nclients: " << nclients
);
184 debugs(20, DBG_IMPORTANT
, "MemObject->reply: " << _reply
);
185 debugs(20, DBG_IMPORTANT
, "MemObject->request: " << request
);
186 debugs(20, DBG_IMPORTANT
, "MemObject->logUri: " << logUri_
);
187 debugs(20, DBG_IMPORTANT
, "MemObject->storeId: " << storeId_
);
191 MemObject::getReply() const
197 MemObject::replaceHttpReply(HttpReply
*newrep
)
199 HTTPMSGUNLOCK(_reply
);
204 struct LowestMemReader
: public unary_function
<store_client
, void> {
205 LowestMemReader(int64_t seed
):current(seed
) {}
207 void operator() (store_client
const &x
) {
208 if (x
.memReaderHasLowerOffset(current
))
209 current
= x
.copyInto
.offset
;
215 struct StoreClientStats
: public unary_function
<store_client
, void> {
216 StoreClientStats(MemBuf
*anEntry
):where(anEntry
),index(0) {}
218 void operator()(store_client
const &x
) {
219 x
.dumpStats(where
, index
);
228 MemObject::stat(MemBuf
* mb
) const
230 mb
->appendf("\t" SQUIDSBUFPH
" %s\n", SQUIDSBUFPRINT(method
.image()), logUri());
231 if (!vary_headers
.isEmpty())
232 mb
->appendf("\tvary_headers: " SQUIDSBUFPH
"\n", SQUIDSBUFPRINT(vary_headers
));
233 mb
->appendf("\tinmem_lo: %" PRId64
"\n", inmem_lo
);
234 mb
->appendf("\tinmem_hi: %" PRId64
"\n", data_hdr
.endOffset());
235 mb
->appendf("\tswapout: %" PRId64
" bytes queued\n", swapout
.queue_offset
);
237 if (swapout
.sio
.getRaw())
238 mb
->appendf("\tswapout: %" PRId64
" bytes written\n", (int64_t) swapout
.sio
->offset());
240 if (xitTable
.index
>= 0)
241 mb
->appendf("\ttransient index: %d state: %d\n", xitTable
.index
, xitTable
.io
);
242 if (memCache
.index
>= 0)
243 mb
->appendf("\tmem-cache index: %d state: %d offset: %" PRId64
"\n", memCache
.index
, memCache
.io
, memCache
.offset
);
245 mb
->appendf("\tobject_sz: %" PRId64
"\n", object_sz
);
247 mb
->appendf("\tsmp-collapsed\n");
249 StoreClientStats
statsVisitor(mb
);
251 for_each
<StoreClientStats
>(clients
, statsVisitor
);
255 MemObject::endOffset () const
257 return data_hdr
.endOffset();
261 MemObject::markEndOfReplyHeaders()
263 const int hdr_sz
= endOffset();
266 _reply
->hdr_sz
= hdr_sz
;
270 MemObject::size() const
279 MemObject::expectedReplySize() const
281 debugs(20, 7, HERE
<< "object_sz: " << object_sz
);
282 if (object_sz
>= 0) // complete() has been called; we know the exact answer
286 const int64_t clen
= _reply
->bodySize(method
);
287 debugs(20, 7, HERE
<< "clen: " << clen
);
288 if (clen
>= 0 && _reply
->hdr_sz
> 0) // yuck: Http::Message sets hdr_sz to 0
289 return clen
+ _reply
->hdr_sz
;
292 return -1; // not enough information to predict
298 assert(swapout
.sio
== NULL
);
299 data_hdr
.freeContent();
301 /* Should we check for clients? */
305 MemObject::lowestMemReaderOffset() const
307 LowestMemReader
lowest (endOffset() + 1);
309 for_each
<LowestMemReader
>(clients
, lowest
);
311 return lowest
.current
;
314 /* XXX: This is wrong. It breaks *badly* on range combining */
316 MemObject::readAheadPolicyCanRead() const
318 const bool canRead
= endOffset() - getReply()->hdr_sz
<
319 lowestMemReaderOffset() + Config
.readAheadGap
;
322 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz
<<
323 " < " << lowestMemReaderOffset() << '+' << Config
.readAheadGap
);
330 MemObject::addClient(store_client
*aClient
)
333 dlinkAdd(aClient
, &aClient
->node
, &clients
);
336 #if URL_CHECKSUM_DEBUG
338 MemObject::checkUrlChecksum () const
340 assert(chksum
== url_checksum(urlXXX()));
346 * How much of the object data is on the disk?
349 MemObject::objectBytesOnDisk() const
352 * NOTE: storeOffset() represents the disk file size,
353 * not the amount of object data on disk.
355 * If we don't have at least 'swap_hdr_sz' bytes
356 * then none of the object data is on disk.
358 * This should still be safe if swap_hdr_sz == 0,
359 * meaning we haven't even opened the swapout file
363 if (swapout
.sio
.getRaw() == NULL
)
366 int64_t nwritten
= swapout
.sio
->offset();
368 if (nwritten
<= (int64_t)swap_hdr_sz
)
371 return (nwritten
- swap_hdr_sz
);
375 MemObject::policyLowestOffsetToKeep(bool swap
) const
378 * Careful. lowest_offset can be greater than endOffset(), such
379 * as in the case of a range request.
381 int64_t lowest_offset
= lowestMemReaderOffset();
383 if (endOffset() < lowest_offset
||
384 endOffset() - inmem_lo
> (int64_t)Config
.Store
.maxInMemObjSize
||
385 (swap
&& !Config
.onoff
.memory_cache_first
))
386 return lowest_offset
;
392 MemObject::trimSwappable()
394 int64_t new_mem_lo
= policyLowestOffsetToKeep(1);
396 * We should only free up to what we know has been written
397 * to disk, not what has been queued for writing. Otherwise
398 * there will be a chunk of the data which is not in memory
399 * and is not yet on disk.
400 * The -1 makes sure the page isn't freed until storeSwapOut has
401 * walked to the next page.
405 if ((on_disk
= objectBytesOnDisk()) - 1 < new_mem_lo
)
406 new_mem_lo
= on_disk
- 1;
408 if (new_mem_lo
== -1)
409 new_mem_lo
= 0; /* the above might become -1 */
411 data_hdr
.freeDataUpto(new_mem_lo
);
413 inmem_lo
= new_mem_lo
;
417 MemObject::trimUnSwappable()
419 if (const int64_t new_mem_lo
= policyLowestOffsetToKeep(false)) {
420 assert (new_mem_lo
> 0);
421 data_hdr
.freeDataUpto(new_mem_lo
);
422 inmem_lo
= new_mem_lo
;
423 } // else we should not trim anything at this time
427 MemObject::isContiguous() const
429 bool result
= data_hdr
.hasContigousContentRange (Range
<int64_t>(inmem_lo
, endOffset()));
430 /* XXX : make this higher level */
431 debugs (19, result
? 4 :3, "MemObject::isContiguous: Returning " << (result
? "true" : "false"));
436 MemObject::mostBytesWanted(int max
, bool ignoreDelayPools
) const
439 if (!ignoreDelayPools
) {
440 /* identify delay id with largest allowance */
441 DelayId largestAllowance
= mostBytesAllowed ();
442 return largestAllowance
.bytesWanted(0, max
);
450 MemObject::setNoDelay(bool const newValue
)
454 for (dlink_node
*node
= clients
.head
; node
; node
= node
->next
) {
455 store_client
*sc
= (store_client
*) node
->data
;
456 sc
->delayId
.setNoDelay(newValue
);
463 MemObject::delayRead(DeferredRead
const &aRead
)
465 deferredReads
.delayRead(aRead
);
469 MemObject::kickReads()
471 deferredReads
.kickReads(-1);
476 MemObject::mostBytesAllowed() const
482 for (dlink_node
*node
= clients
.head
; node
; node
= node
->next
) {
483 store_client
*sc
= (store_client
*) node
->data
;
485 /* This test is invalid because the client may be writing data
486 * and thus will want data immediately.
487 * If we include the test, there is a race condition when too much
488 * data is read - if all sc's are writing when a read is scheduled.
492 if (!sc
->callbackPending())
493 /* not waiting for more data */
498 j
= sc
->delayId
.bytesWanted(0, sc
->copyInto
.length
);
502 result
= sc
->delayId
;
512 MemObject::availableForSwapOut() const
514 return endOffset() - swapout
.queue_offset
;