]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemObject.cc
Source Format Enforcement (#532)
[thirdparty/squid.git] / src / MemObject.cc
CommitLineData
528b2c61 1/*
77b1029d 2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
528b2c61 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
528b2c61 7 */
8
bbc27441
AJ
9/* DEBUG: section 19 Store Memory Primitives */
10
582c2af2 11#include "squid.h"
3e4bebf8 12#include "comm/Connection.h"
582c2af2 13#include "Generic.h"
af69c635 14#include "globals.h"
528b2c61 15#include "HttpReply.h"
582c2af2
FC
16#include "MemBuf.h"
17#include "MemObject.h"
18#include "profiler/Profiler.h"
4d5904f7 19#include "SquidConfig.h"
528b2c61 20#include "Store.h"
21#include "StoreClient.h"
582c2af2 22
9a0a18de 23#if USE_DELAY_POOLS
b67e2c8c 24#include "DelayPools.h"
25#endif
528b2c61 26
27/* TODO: make this global or private */
28#if URL_CHECKSUM_DEBUG
29static unsigned int url_checksum(const char *url);
30unsigned int
31url_checksum(const char *url)
32{
33 unsigned int ck;
c3031d67 34 SquidMD5_CTX M;
528b2c61 35 static unsigned char digest[16];
c3031d67 36 SquidMD5Init(&M);
37 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
38 SquidMD5Final(digest, &M);
41d00cd3 39 memcpy(&ck, digest, sizeof(ck));
528b2c61 40 return ck;
41}
62e76326 42
528b2c61 43#endif
44
aa839030 45RemovalPolicy * mem_policy = NULL;
46
528b2c61 47size_t
48MemObject::inUseCount()
49{
9f9e06f3 50 return Pool().inUseCount();
528b2c61 51}
52
c877c0bc 53const char *
9d4e9cfb
AR
54MemObject::storeId() const
55{
dcd84f80 56 if (!storeId_.size()) {
c877c0bc
AR
57 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
58 dump();
59 storeId_ = "[unknown_URI]";
60 }
61 return storeId_.termedBuf();
9487bae9
AR
62}
63
c877c0bc 64const char *
9d4e9cfb
AR
65MemObject::logUri() const
66{
dcd84f80 67 return logUri_.size() ? logUri_.termedBuf() : storeId();
c877c0bc 68}
4a56ee8d 69
c877c0bc 70bool
9d4e9cfb
AR
71MemObject::hasUris() const
72{
dcd84f80 73 return storeId_.size();
c877c0bc 74}
4a56ee8d 75
c877c0bc
AR
76void
77MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
78{
76d61119
EB
79 if (hasUris())
80 return;
81
c877c0bc 82 storeId_ = aStoreId;
76d61119 83 debugs(88, 3, this << " storeId: " << storeId_);
c877c0bc
AR
84
85 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
9d4e9cfb 86 if (!aLogUri || aLogUri == aStoreId)
c877c0bc
AR
87 logUri_.clean(); // use storeId_ by default to minimize copying
88 else
89 logUri_ = aLogUri;
62e76326 90
c877c0bc 91 method = aMethod;
4a56ee8d 92
c877c0bc
AR
93#if URL_CHECKSUM_DEBUG
94 chksum = url_checksum(urlXXX());
528b2c61 95#endif
c877c0bc 96}
62e76326 97
b56b37cf 98MemObject::MemObject()
c877c0bc 99{
b56b37cf
AJ
100 debugs(20, 3, "MemObject constructed, this=" << this);
101 ping_reply_callback = nullptr;
cc8c4af2 102 memset(&start_ping, 0, sizeof(start_ping));
a0c227a9 103 reply_ = new HttpReply;
528b2c61 104}
105
106MemObject::~MemObject()
107{
b56b37cf 108 debugs(20, 3, "MemObject destructed, this=" << this);
0df20c61 109 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
62e76326 110
c877c0bc
AR
111#if URL_CHECKSUM_DEBUG
112 checkUrlChecksum();
528b2c61 113#endif
62e76326 114
4475555f 115 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
4475555f
AR
116 assert(xitTable.index < 0);
117 assert(memCache.index < 0);
528b2c61 118 assert(swapout.sio == NULL);
4475555f 119 }
62e76326 120
528b2c61 121 data_hdr.freeContent();
62e76326 122
9cdee68d 123#if 0
528b2c61 124 /*
125 * There is no way to abort FD-less clients, so they might
9cdee68d 126 * still have mem->clients set.
528b2c61 127 */
9cdee68d 128 assert(clients.head == NULL);
129
130#endif
62e76326 131
528b2c61 132 ctx_exit(ctx); /* must exit before we free mem->url */
528b2c61 133}
134
66d51f4f
AR
135HttpReply &
136MemObject::adjustableBaseReply()
137{
138 assert(!updatedReply_);
139 return *reply_;
140}
141
142void
143MemObject::replaceBaseReply(const HttpReplyPointer &r)
144{
145 assert(r);
146 reply_ = r;
147 updatedReply_ = nullptr;
148}
149
528b2c61 150void
55759ffb 151MemObject::write(const StoreIOBuffer &writeBuffer)
528b2c61 152{
1d5161bd 153 PROF_start(MemObject_write);
4a7a3d56 154 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
528b2c61 155
528b2c61 156 /* We don't separate out mime headers yet, so ensure that the first
26ac0430 157 * write is at offset 0 - where they start
528b2c61 158 */
159 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
160
161 assert (data_hdr.write (writeBuffer));
1d5161bd 162 PROF_stop(MemObject_write);
528b2c61 163}
164
165void
166MemObject::dump() const
167{
42a503bd 168 data_hdr.dump();
528b2c61 169#if 0
170 /* do we want this one? */
e0236918 171 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
528b2c61 172#endif
62e76326 173
e0236918
FC
174 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
175 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
176 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
177 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
a0c227a9 178 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << reply_);
66d51f4f
AR
179 debugs(20, DBG_IMPORTANT, "MemObject->updatedReply: " << updatedReply_);
180 debugs(20, DBG_IMPORTANT, "MemObject->appliedUpdates: " << appliedUpdates);
e0236918 181 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
c877c0bc
AR
182 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
183 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
528b2c61 184}
185
26ac0430
AJ
186struct LowestMemReader : public unary_function<store_client, void> {
187 LowestMemReader(int64_t seed):current(seed) {}
62e76326 188
26ac0430 189 void operator() (store_client const &x) {
62e76326 190 if (x.memReaderHasLowerOffset(current))
191 current = x.copyInto.offset;
192 }
193
47f6e231 194 int64_t current;
528b2c61 195};
196
26ac0430
AJ
197struct StoreClientStats : public unary_function<store_client, void> {
198 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
62e76326 199
26ac0430 200 void operator()(store_client const &x) {
aec55359
FC
201 x.dumpStats(where, index);
202 ++index;
528b2c61 203 }
62e76326 204
fcc35180 205 MemBuf *where;
528b2c61 206 size_t index;
207};
208
209void
83af6fa2 210MemObject::stat(MemBuf * mb) const
528b2c61 211{
4391cd15 212 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
90ab8f20
AJ
213 if (!vary_headers.isEmpty())
214 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
4391cd15
AJ
215 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
216 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
217 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
62e76326 218
528b2c61 219 if (swapout.sio.getRaw())
4391cd15 220 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
62e76326 221
752fd8d2 222 if (xitTable.index >= 0)
4391cd15 223 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
752fd8d2 224 if (memCache.index >= 0)
4391cd15 225 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
752fd8d2 226 if (object_sz >= 0)
4391cd15 227 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
752fd8d2 228
fcc35180 229 StoreClientStats statsVisitor(mb);
62e76326 230
4cbb7fa8 231 for_each<StoreClientStats>(clients, statsVisitor);
528b2c61 232}
233
47f6e231 234int64_t
528b2c61 235MemObject::endOffset () const
236{
237 return data_hdr.endOffset();
238}
239
3756e5c0
AR
240void
241MemObject::markEndOfReplyHeaders()
242{
243 const int hdr_sz = endOffset();
244 assert(hdr_sz >= 0);
a0c227a9
AJ
245 assert(reply_);
246 reply_->hdr_sz = hdr_sz;
3756e5c0
AR
247}
248
47f6e231 249int64_t
528b2c61 250MemObject::size() const
251{
62e76326 252 if (object_sz < 0)
253 return endOffset();
254
528b2c61 255 return object_sz;
256}
257
aa1a691e 258int64_t
9199139f
AR
259MemObject::expectedReplySize() const
260{
66d51f4f
AR
261 if (object_sz >= 0) {
262 debugs(20, 7, object_sz << " frozen by complete()");
aa1a691e 263 return object_sz;
66d51f4f
AR
264 }
265
266 const auto hdr_sz = baseReply().hdr_sz;
aa1a691e 267
66d51f4f
AR
268 // Cannot predict future length using an empty/unset or HTTP/0 reply.
269 // For any HTTP/1 reply, hdr_sz is positive -- status-line cannot be empty.
270 if (hdr_sz <= 0)
271 return -1;
272
273 const auto clen = baseReply().bodySize(method);
274 if (clen < 0) {
275 debugs(20, 7, "unknown; hdr: " << hdr_sz);
276 return -1;
aa1a691e
AR
277 }
278
66d51f4f
AR
279 const auto messageSize = clen + hdr_sz;
280 debugs(20, 7, messageSize << " hdr: " << hdr_sz << " clen: " << clen);
281 return messageSize;
aa1a691e
AR
282}
283
528b2c61 284void
285MemObject::reset()
286{
287 assert(swapout.sio == NULL);
288 data_hdr.freeContent();
289 inmem_lo = 0;
290 /* Should we check for clients? */
66d51f4f
AR
291 assert(reply_);
292 reply_->reset();
293 updatedReply_ = nullptr;
294 appliedUpdates = false;
528b2c61 295}
296
47f6e231 297int64_t
528b2c61 298MemObject::lowestMemReaderOffset() const
299{
300 LowestMemReader lowest (endOffset() + 1);
301
4cbb7fa8 302 for_each <LowestMemReader>(clients, lowest);
62e76326 303
528b2c61 304 return lowest.current;
305}
306
307/* XXX: This is wrong. It breaks *badly* on range combining */
308bool
309MemObject::readAheadPolicyCanRead() const
310{
66d51f4f
AR
311 const auto savedHttpHeaders = baseReply().hdr_sz;
312 const bool canRead = endOffset() - savedHttpHeaders <
9d4e9cfb 313 lowestMemReaderOffset() + Config.readAheadGap;
f54986ad
AR
314
315 if (!canRead) {
66d51f4f 316 debugs(19, 5, "no: " << endOffset() << '-' << savedHttpHeaders <<
f54986ad
AR
317 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
318 }
319
320 return canRead;
528b2c61 321}
322
323void
324MemObject::addClient(store_client *aClient)
325{
326 ++nclients;
327 dlinkAdd(aClient, &aClient->node, &clients);
328}
329
330#if URL_CHECKSUM_DEBUG
331void
332MemObject::checkUrlChecksum () const
333{
c877c0bc 334 assert(chksum == url_checksum(urlXXX()));
528b2c61 335}
62e76326 336
528b2c61 337#endif
338
339/*
340 * How much of the object data is on the disk?
341 */
47f6e231 342int64_t
528b2c61 343MemObject::objectBytesOnDisk() const
344{
345 /*
346 * NOTE: storeOffset() represents the disk file size,
347 * not the amount of object data on disk.
26ac0430 348 *
528b2c61 349 * If we don't have at least 'swap_hdr_sz' bytes
350 * then none of the object data is on disk.
351 *
352 * This should still be safe if swap_hdr_sz == 0,
353 * meaning we haven't even opened the swapout file
354 * yet.
355 */
62e76326 356
528b2c61 357 if (swapout.sio.getRaw() == NULL)
62e76326 358 return 0;
359
47f6e231 360 int64_t nwritten = swapout.sio->offset();
62e76326 361
ed013b6c 362 if (nwritten <= (int64_t)swap_hdr_sz)
62e76326 363 return 0;
364
47f6e231 365 return (nwritten - swap_hdr_sz);
528b2c61 366}
367
47f6e231 368int64_t
10aeba1d 369MemObject::policyLowestOffsetToKeep(bool swap) const
528b2c61 370{
371 /*
372 * Careful. lowest_offset can be greater than endOffset(), such
373 * as in the case of a range request.
374 */
47f6e231 375 int64_t lowest_offset = lowestMemReaderOffset();
62e76326 376
528b2c61 377 if (endOffset() < lowest_offset ||
ff4b33f4 378 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
10aeba1d 379 (swap && !Config.onoff.memory_cache_first))
62e76326 380 return lowest_offset;
381
528b2c61 382 return inmem_lo;
383}
384
385void
386MemObject::trimSwappable()
387{
10aeba1d 388 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
528b2c61 389 /*
390 * We should only free up to what we know has been written
391 * to disk, not what has been queued for writing. Otherwise
392 * there will be a chunk of the data which is not in memory
393 * and is not yet on disk.
394 * The -1 makes sure the page isn't freed until storeSwapOut has
aa1a691e 395 * walked to the next page.
528b2c61 396 */
47f6e231 397 int64_t on_disk;
62e76326 398
528b2c61 399 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
62e76326 400 new_mem_lo = on_disk - 1;
401
528b2c61 402 if (new_mem_lo == -1)
f53969cc 403 new_mem_lo = 0; /* the above might become -1 */
62e76326 404
528b2c61 405 data_hdr.freeDataUpto(new_mem_lo);
62e76326 406
528b2c61 407 inmem_lo = new_mem_lo;
408}
409
410void
411MemObject::trimUnSwappable()
412{
99921d9d
AR
413 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
414 assert (new_mem_lo > 0);
415 data_hdr.freeDataUpto(new_mem_lo);
416 inmem_lo = new_mem_lo;
417 } // else we should not trim anything at this time
528b2c61 418}
419
528b2c61 420bool
421MemObject::isContiguous() const
422{
47f6e231 423 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
528b2c61 424 /* XXX : make this higher level */
bf8fe701 425 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
528b2c61 426 return result;
427}
b67e2c8c 428
429int
384a7590 430MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
b67e2c8c 431{
9a0a18de 432#if USE_DELAY_POOLS
384a7590
JP
433 if (!ignoreDelayPools) {
434 /* identify delay id with largest allowance */
435 DelayId largestAllowance = mostBytesAllowed ();
436 return largestAllowance.bytesWanted(0, max);
437 }
438#endif
62e76326 439
b67e2c8c 440 return max;
b67e2c8c 441}
442
a46d2c0e 443void
444MemObject::setNoDelay(bool const newValue)
445{
9a0a18de 446#if USE_DELAY_POOLS
a46d2c0e 447
448 for (dlink_node *node = clients.head; node; node = node->next) {
449 store_client *sc = (store_client *) node->data;
450 sc->delayId.setNoDelay(newValue);
451 }
452
453#endif
454}
455
456void
457MemObject::delayRead(DeferredRead const &aRead)
458{
f1ba1fba 459#if USE_DELAY_POOLS
e71eae24
SM
460 if (readAheadPolicyCanRead()) {
461 if (DelayId mostAllowedId = mostBytesAllowed()) {
462 mostAllowedId.delayRead(aRead);
463 return;
f1ba1fba 464 }
e71eae24 465 }
f1ba1fba 466#endif
a46d2c0e 467 deferredReads.delayRead(aRead);
468}
469
470void
471MemObject::kickReads()
472{
473 deferredReads.kickReads(-1);
474}
475
9a0a18de 476#if USE_DELAY_POOLS
b67e2c8c 477DelayId
478MemObject::mostBytesAllowed() const
479{
480 int j;
481 int jmax = -1;
482 DelayId result;
62e76326 483
b67e2c8c 484 for (dlink_node *node = clients.head; node; node = node->next) {
62e76326 485 store_client *sc = (store_client *) node->data;
d576a6a6 486#if 0
62e76326 487 /* This test is invalid because the client may be writing data
488 * and thus will want data immediately.
489 * If we include the test, there is a race condition when too much
490 * data is read - if all sc's are writing when a read is scheduled.
491 * XXX: fixme.
492 */
493
494 if (!sc->callbackPending())
495 /* not waiting for more data */
496 continue;
497
d576a6a6 498#endif
62e76326 499
62e76326 500 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
501
502 if (j > jmax) {
503 jmax = j;
504 result = sc->delayId;
505 }
b67e2c8c 506 }
62e76326 507
b67e2c8c 508 return result;
509}
62e76326 510
b67e2c8c 511#endif
5b55f1f1
CT
512
513int64_t
514MemObject::availableForSwapOut() const
515{
516 return endOffset() - swapout.queue_offset;
517}
f53969cc 518