]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 19 Store Memory Primitives */
12 #include "comm/Connection.h"
15 #include "HttpReply.h"
17 #include "MemObject.h"
18 #include "SquidConfig.h"
20 #include "StoreClient.h"
23 #include "DelayPools.h"
26 /* TODO: make this global or private */
27 #if URL_CHECKSUM_DEBUG
28 static unsigned int url_checksum(const char *url
);
30 url_checksum(const char *url
)
34 static unsigned char digest
[16];
36 SquidMD5Update(&M
, (unsigned char *) url
, strlen(url
));
37 SquidMD5Final(digest
, &M
);
38 memcpy(&ck
, digest
, sizeof(ck
));
44 RemovalPolicy
* mem_policy
= NULL
;
47 MemObject::inUseCount()
49 return Pool().inUseCount();
53 MemObject::storeId() const
55 if (!storeId_
.size()) {
56 debugs(20, DBG_IMPORTANT
, "ERROR: Squid BUG: Missing MemObject::storeId value");
58 storeId_
= "[unknown_URI]";
60 return storeId_
.termedBuf();
64 MemObject::logUri() const
66 return logUri_
.size() ? logUri_
.termedBuf() : storeId();
70 MemObject::hasUris() const
72 return storeId_
.size();
76 MemObject::setUris(char const *aStoreId
, char const *aLogUri
, const HttpRequestMethod
&aMethod
)
82 debugs(88, 3, this << " storeId: " << storeId_
);
84 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
85 if (!aLogUri
|| aLogUri
== aStoreId
)
86 logUri_
.clean(); // use storeId_ by default to minimize copying
92 #if URL_CHECKSUM_DEBUG
93 chksum
= url_checksum(urlXXX());
97 MemObject::MemObject()
99 debugs(20, 3, "MemObject constructed, this=" << this);
100 ping_reply_callback
= nullptr;
101 memset(&start_ping
, 0, sizeof(start_ping
));
102 reply_
= new HttpReply
;
105 MemObject::~MemObject()
107 debugs(20, 3, "MemObject destructed, this=" << this);
109 #if URL_CHECKSUM_DEBUG
113 if (!shutting_down
) { // Store::Root() is FATALly missing during shutdown
114 assert(xitTable
.index
< 0);
115 assert(memCache
.index
< 0);
116 assert(swapout
.sio
== NULL
);
119 data_hdr
.freeContent();
123 MemObject::adjustableBaseReply()
125 assert(!updatedReply_
);
130 MemObject::replaceBaseReply(const HttpReplyPointer
&r
)
134 updatedReply_
= nullptr;
138 MemObject::write(const StoreIOBuffer
&writeBuffer
)
140 debugs(19, 6, "memWrite: offset " << writeBuffer
.offset
<< " len " << writeBuffer
.length
);
142 /* We don't separate out mime headers yet, so ensure that the first
143 * write is at offset 0 - where they start
145 assert (data_hdr
.endOffset() || writeBuffer
.offset
== 0);
147 assert (data_hdr
.write (writeBuffer
));
151 MemObject::dump() const
155 debugs(20, DBG_IMPORTANT
, "MemObject->start_ping: " << start_ping
);
156 debugs(20, DBG_IMPORTANT
, "MemObject->inmem_hi: " << data_hdr
.endOffset());
157 debugs(20, DBG_IMPORTANT
, "MemObject->inmem_lo: " << inmem_lo
);
158 debugs(20, DBG_IMPORTANT
, "MemObject->nclients: " << nclients
);
159 debugs(20, DBG_IMPORTANT
, "MemObject->reply: " << reply_
);
160 debugs(20, DBG_IMPORTANT
, "MemObject->updatedReply: " << updatedReply_
);
161 debugs(20, DBG_IMPORTANT
, "MemObject->appliedUpdates: " << appliedUpdates
);
162 debugs(20, DBG_IMPORTANT
, "MemObject->request: " << request
);
163 debugs(20, DBG_IMPORTANT
, "MemObject->logUri: " << logUri_
);
164 debugs(20, DBG_IMPORTANT
, "MemObject->storeId: " << storeId_
);
167 struct LowestMemReader
: public unary_function
<store_client
, void> {
168 LowestMemReader(int64_t seed
):current(seed
) {}
170 void operator() (store_client
const &x
) {
171 if (x
.memReaderHasLowerOffset(current
))
172 current
= x
.copyInto
.offset
;
178 struct StoreClientStats
: public unary_function
<store_client
, void> {
179 StoreClientStats(MemBuf
*anEntry
):where(anEntry
),index(0) {}
181 void operator()(store_client
const &x
) {
182 x
.dumpStats(where
, index
);
191 MemObject::stat(MemBuf
* mb
) const
193 mb
->appendf("\t" SQUIDSBUFPH
" %s\n", SQUIDSBUFPRINT(method
.image()), logUri());
194 if (!vary_headers
.isEmpty())
195 mb
->appendf("\tvary_headers: " SQUIDSBUFPH
"\n", SQUIDSBUFPRINT(vary_headers
));
196 mb
->appendf("\tinmem_lo: %" PRId64
"\n", inmem_lo
);
197 mb
->appendf("\tinmem_hi: %" PRId64
"\n", data_hdr
.endOffset());
198 mb
->appendf("\tswapout: %" PRId64
" bytes queued\n", swapout
.queue_offset
);
200 if (swapout
.sio
.getRaw())
201 mb
->appendf("\tswapout: %" PRId64
" bytes written\n", (int64_t) swapout
.sio
->offset());
203 if (xitTable
.index
>= 0)
204 mb
->appendf("\ttransient index: %d state: %d\n", xitTable
.index
, xitTable
.io
);
205 if (memCache
.index
>= 0)
206 mb
->appendf("\tmem-cache index: %d state: %d offset: %" PRId64
"\n", memCache
.index
, memCache
.io
, memCache
.offset
);
208 mb
->appendf("\tobject_sz: %" PRId64
"\n", object_sz
);
210 StoreClientStats
statsVisitor(mb
);
212 for_each
<StoreClientStats
>(clients
, statsVisitor
);
216 MemObject::endOffset () const
218 return data_hdr
.endOffset();
222 MemObject::markEndOfReplyHeaders()
224 const int hdr_sz
= endOffset();
227 reply_
->hdr_sz
= hdr_sz
;
231 MemObject::size() const
240 MemObject::expectedReplySize() const
242 if (object_sz
>= 0) {
243 debugs(20, 7, object_sz
<< " frozen by complete()");
247 const auto hdr_sz
= baseReply().hdr_sz
;
249 // Cannot predict future length using an empty/unset or HTTP/0 reply.
250 // For any HTTP/1 reply, hdr_sz is positive -- status-line cannot be empty.
254 const auto clen
= baseReply().bodySize(method
);
256 debugs(20, 7, "unknown; hdr: " << hdr_sz
);
260 const auto messageSize
= clen
+ hdr_sz
;
261 debugs(20, 7, messageSize
<< " hdr: " << hdr_sz
<< " clen: " << clen
);
268 assert(swapout
.sio
== NULL
);
269 data_hdr
.freeContent();
271 /* Should we check for clients? */
274 updatedReply_
= nullptr;
275 appliedUpdates
= false;
279 MemObject::lowestMemReaderOffset() const
281 LowestMemReader
lowest (endOffset() + 1);
283 for_each
<LowestMemReader
>(clients
, lowest
);
285 return lowest
.current
;
288 /* XXX: This is wrong. It breaks *badly* on range combining */
290 MemObject::readAheadPolicyCanRead() const
292 const auto savedHttpHeaders
= baseReply().hdr_sz
;
293 const bool canRead
= endOffset() - savedHttpHeaders
<
294 lowestMemReaderOffset() + Config
.readAheadGap
;
297 debugs(19, 5, "no: " << endOffset() << '-' << savedHttpHeaders
<<
298 " < " << lowestMemReaderOffset() << '+' << Config
.readAheadGap
);
305 MemObject::addClient(store_client
*aClient
)
308 dlinkAdd(aClient
, &aClient
->node
, &clients
);
311 #if URL_CHECKSUM_DEBUG
313 MemObject::checkUrlChecksum () const
315 assert(chksum
== url_checksum(urlXXX()));
321 * How much of the object data is on the disk?
324 MemObject::objectBytesOnDisk() const
327 * NOTE: storeOffset() represents the disk file size,
328 * not the amount of object data on disk.
330 * If we don't have at least 'swap_hdr_sz' bytes
331 * then none of the object data is on disk.
333 * This should still be safe if swap_hdr_sz == 0,
334 * meaning we haven't even opened the swapout file
338 if (swapout
.sio
.getRaw() == NULL
)
341 int64_t nwritten
= swapout
.sio
->offset();
343 if (nwritten
<= (int64_t)swap_hdr_sz
)
346 return (nwritten
- swap_hdr_sz
);
350 MemObject::policyLowestOffsetToKeep(bool swap
) const
353 * Careful. lowest_offset can be greater than endOffset(), such
354 * as in the case of a range request.
356 int64_t lowest_offset
= lowestMemReaderOffset();
358 if (endOffset() < lowest_offset
||
359 endOffset() - inmem_lo
> (int64_t)Config
.Store
.maxInMemObjSize
||
360 (swap
&& !Config
.onoff
.memory_cache_first
))
361 return lowest_offset
;
367 MemObject::trimSwappable()
369 int64_t new_mem_lo
= policyLowestOffsetToKeep(1);
371 * We should only free up to what we know has been written
372 * to disk, not what has been queued for writing. Otherwise
373 * there will be a chunk of the data which is not in memory
374 * and is not yet on disk.
375 * The -1 makes sure the page isn't freed until storeSwapOut has
376 * walked to the next page.
380 if ((on_disk
= objectBytesOnDisk()) - 1 < new_mem_lo
)
381 new_mem_lo
= on_disk
- 1;
383 if (new_mem_lo
== -1)
384 new_mem_lo
= 0; /* the above might become -1 */
386 data_hdr
.freeDataUpto(new_mem_lo
);
388 inmem_lo
= new_mem_lo
;
392 MemObject::trimUnSwappable()
394 if (const int64_t new_mem_lo
= policyLowestOffsetToKeep(false)) {
395 assert (new_mem_lo
> 0);
396 data_hdr
.freeDataUpto(new_mem_lo
);
397 inmem_lo
= new_mem_lo
;
398 } // else we should not trim anything at this time
402 MemObject::isContiguous() const
404 bool result
= data_hdr
.hasContigousContentRange (Range
<int64_t>(inmem_lo
, endOffset()));
405 /* XXX : make this higher level */
406 debugs (19, result
? 4 :3, "MemObject::isContiguous: Returning " << (result
? "true" : "false"));
411 MemObject::mostBytesWanted(int max
, bool ignoreDelayPools
) const
414 if (!ignoreDelayPools
) {
415 /* identify delay id with largest allowance */
416 DelayId largestAllowance
= mostBytesAllowed ();
417 return largestAllowance
.bytesWanted(0, max
);
420 (void)ignoreDelayPools
;
427 MemObject::setNoDelay(bool const newValue
)
431 for (dlink_node
*node
= clients
.head
; node
; node
= node
->next
) {
432 store_client
*sc
= (store_client
*) node
->data
;
433 sc
->delayId
.setNoDelay(newValue
);
441 MemObject::delayRead(const AsyncCall::Pointer
&aRead
)
444 if (readAheadPolicyCanRead()) {
445 if (DelayId mostAllowedId
= mostBytesAllowed()) {
446 mostAllowedId
.delayRead(aRead
);
451 deferredReads
.delay(aRead
);
455 MemObject::kickReads()
457 deferredReads
.schedule();
462 MemObject::mostBytesAllowed() const
468 for (dlink_node
*node
= clients
.head
; node
; node
= node
->next
) {
469 store_client
*sc
= (store_client
*) node
->data
;
471 j
= sc
->delayId
.bytesWanted(0, sc
->copyInto
.length
);
475 result
= sc
->delayId
;
485 MemObject::availableForSwapOut() const
487 return endOffset() - swapout
.queue_offset
;