]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemObject.cc
Docs: Copyright updates for 2018 (#114)
[thirdparty/squid.git] / src / MemObject.cc
CommitLineData
528b2c61 1/*
5b74111a 2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
528b2c61 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
528b2c61 7 */
8
bbc27441
AJ
9/* DEBUG: section 19 Store Memory Primitives */
10
582c2af2 11#include "squid.h"
3e4bebf8 12#include "comm/Connection.h"
582c2af2 13#include "Generic.h"
af69c635 14#include "globals.h"
528b2c61 15#include "HttpReply.h"
582c2af2
FC
16#include "MemBuf.h"
17#include "MemObject.h"
18#include "profiler/Profiler.h"
4d5904f7 19#include "SquidConfig.h"
528b2c61 20#include "Store.h"
21#include "StoreClient.h"
582c2af2 22
9a0a18de 23#if USE_DELAY_POOLS
b67e2c8c 24#include "DelayPools.h"
25#endif
528b2c61 26
27/* TODO: make this global or private */
28#if URL_CHECKSUM_DEBUG
29static unsigned int url_checksum(const char *url);
30unsigned int
31url_checksum(const char *url)
32{
33 unsigned int ck;
c3031d67 34 SquidMD5_CTX M;
528b2c61 35 static unsigned char digest[16];
c3031d67 36 SquidMD5Init(&M);
37 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
38 SquidMD5Final(digest, &M);
41d00cd3 39 memcpy(&ck, digest, sizeof(ck));
528b2c61 40 return ck;
41}
62e76326 42
528b2c61 43#endif
44
aa839030 45RemovalPolicy * mem_policy = NULL;
46
528b2c61 47size_t
48MemObject::inUseCount()
49{
9f9e06f3 50 return Pool().inUseCount();
528b2c61 51}
52
c877c0bc 53const char *
9d4e9cfb
AR
54MemObject::storeId() const
55{
dcd84f80 56 if (!storeId_.size()) {
c877c0bc
AR
57 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
58 dump();
59 storeId_ = "[unknown_URI]";
60 }
61 return storeId_.termedBuf();
9487bae9
AR
62}
63
c877c0bc 64const char *
9d4e9cfb
AR
65MemObject::logUri() const
66{
dcd84f80 67 return logUri_.size() ? logUri_.termedBuf() : storeId();
c877c0bc 68}
4a56ee8d 69
c877c0bc 70bool
9d4e9cfb
AR
71MemObject::hasUris() const
72{
dcd84f80 73 return storeId_.size();
c877c0bc 74}
4a56ee8d 75
c877c0bc
AR
76void
77MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
78{
76d61119
EB
79 if (hasUris())
80 return;
81
c877c0bc 82 storeId_ = aStoreId;
76d61119 83 debugs(88, 3, this << " storeId: " << storeId_);
c877c0bc
AR
84
85 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
9d4e9cfb 86 if (!aLogUri || aLogUri == aStoreId)
c877c0bc
AR
87 logUri_.clean(); // use storeId_ by default to minimize copying
88 else
89 logUri_ = aLogUri;
62e76326 90
c877c0bc 91 method = aMethod;
4a56ee8d 92
c877c0bc
AR
93#if URL_CHECKSUM_DEBUG
94 chksum = url_checksum(urlXXX());
528b2c61 95#endif
c877c0bc 96}
62e76326 97
cc8c4af2
AJ
98MemObject::MemObject() :
99 inmem_lo(0),
100 nclients(0),
101 smpCollapsed(false),
d59e4742
FC
102 ping_reply_callback(nullptr),
103 ircb_data(nullptr),
cc8c4af2
AJ
104 id(0),
105 object_sz(-1),
106 swap_hdr_sz(0),
107#if URL_CHECKSUM_DEBUG
108 chksum(0),
109#endif
d59e4742 110 vary_headers(nullptr)
c877c0bc 111{
cc8c4af2
AJ
112 debugs(20, 3, "new MemObject " << this);
113 memset(&start_ping, 0, sizeof(start_ping));
114 memset(&abort, 0, sizeof(abort));
a0c227a9 115 reply_ = new HttpReply;
528b2c61 116}
117
118MemObject::~MemObject()
119{
cc8c4af2 120 debugs(20, 3, "del MemObject " << this);
0df20c61 121 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
62e76326 122
c877c0bc
AR
123#if URL_CHECKSUM_DEBUG
124 checkUrlChecksum();
528b2c61 125#endif
62e76326 126
4475555f 127 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
4475555f
AR
128 assert(xitTable.index < 0);
129 assert(memCache.index < 0);
528b2c61 130 assert(swapout.sio == NULL);
4475555f 131 }
62e76326 132
528b2c61 133 data_hdr.freeContent();
62e76326 134
9cdee68d 135#if 0
528b2c61 136 /*
137 * There is no way to abort FD-less clients, so they might
9cdee68d 138 * still have mem->clients set.
528b2c61 139 */
9cdee68d 140 assert(clients.head == NULL);
141
142#endif
62e76326 143
528b2c61 144 ctx_exit(ctx); /* must exit before we free mem->url */
528b2c61 145}
146
528b2c61 147void
55759ffb 148MemObject::write(const StoreIOBuffer &writeBuffer)
528b2c61 149{
1d5161bd 150 PROF_start(MemObject_write);
4a7a3d56 151 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
528b2c61 152
528b2c61 153 /* We don't separate out mime headers yet, so ensure that the first
26ac0430 154 * write is at offset 0 - where they start
528b2c61 155 */
156 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
157
158 assert (data_hdr.write (writeBuffer));
1d5161bd 159 PROF_stop(MemObject_write);
528b2c61 160}
161
162void
163MemObject::dump() const
164{
42a503bd 165 data_hdr.dump();
528b2c61 166#if 0
167 /* do we want this one? */
e0236918 168 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
528b2c61 169#endif
62e76326 170
e0236918
FC
171 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
172 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
173 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
174 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
a0c227a9 175 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << reply_);
e0236918 176 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
c877c0bc
AR
177 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
178 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
528b2c61 179}
180
26ac0430
AJ
181struct LowestMemReader : public unary_function<store_client, void> {
182 LowestMemReader(int64_t seed):current(seed) {}
62e76326 183
26ac0430 184 void operator() (store_client const &x) {
62e76326 185 if (x.memReaderHasLowerOffset(current))
186 current = x.copyInto.offset;
187 }
188
47f6e231 189 int64_t current;
528b2c61 190};
191
26ac0430
AJ
192struct StoreClientStats : public unary_function<store_client, void> {
193 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
62e76326 194
26ac0430 195 void operator()(store_client const &x) {
aec55359
FC
196 x.dumpStats(where, index);
197 ++index;
528b2c61 198 }
62e76326 199
fcc35180 200 MemBuf *where;
528b2c61 201 size_t index;
202};
203
204void
83af6fa2 205MemObject::stat(MemBuf * mb) const
528b2c61 206{
4391cd15 207 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
90ab8f20
AJ
208 if (!vary_headers.isEmpty())
209 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
4391cd15
AJ
210 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
211 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
212 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
62e76326 213
528b2c61 214 if (swapout.sio.getRaw())
4391cd15 215 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
62e76326 216
752fd8d2 217 if (xitTable.index >= 0)
4391cd15 218 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
752fd8d2 219 if (memCache.index >= 0)
4391cd15 220 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
752fd8d2 221 if (object_sz >= 0)
4391cd15 222 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
752fd8d2 223 if (smpCollapsed)
4391cd15 224 mb->appendf("\tsmp-collapsed\n");
752fd8d2 225
fcc35180 226 StoreClientStats statsVisitor(mb);
62e76326 227
4cbb7fa8 228 for_each<StoreClientStats>(clients, statsVisitor);
528b2c61 229}
230
47f6e231 231int64_t
528b2c61 232MemObject::endOffset () const
233{
234 return data_hdr.endOffset();
235}
236
3756e5c0
AR
237void
238MemObject::markEndOfReplyHeaders()
239{
240 const int hdr_sz = endOffset();
241 assert(hdr_sz >= 0);
a0c227a9
AJ
242 assert(reply_);
243 reply_->hdr_sz = hdr_sz;
3756e5c0
AR
244}
245
47f6e231 246int64_t
528b2c61 247MemObject::size() const
248{
62e76326 249 if (object_sz < 0)
250 return endOffset();
251
528b2c61 252 return object_sz;
253}
254
aa1a691e 255int64_t
9199139f
AR
256MemObject::expectedReplySize() const
257{
a0c227a9 258 debugs(20, 7, "object_sz: " << object_sz);
aa1a691e
AR
259 if (object_sz >= 0) // complete() has been called; we know the exact answer
260 return object_sz;
261
a0c227a9
AJ
262 if (reply_) {
263 const int64_t clen = reply_->bodySize(method);
264 debugs(20, 7, "clen: " << clen);
265 if (clen >= 0 && reply_->hdr_sz > 0) // yuck: Http::Message sets hdr_sz to 0
266 return clen + reply_->hdr_sz;
aa1a691e
AR
267 }
268
269 return -1; // not enough information to predict
270}
271
528b2c61 272void
273MemObject::reset()
274{
275 assert(swapout.sio == NULL);
276 data_hdr.freeContent();
277 inmem_lo = 0;
278 /* Should we check for clients? */
a0c227a9
AJ
279 if (reply_)
280 reply_->reset();
528b2c61 281}
282
47f6e231 283int64_t
528b2c61 284MemObject::lowestMemReaderOffset() const
285{
286 LowestMemReader lowest (endOffset() + 1);
287
4cbb7fa8 288 for_each <LowestMemReader>(clients, lowest);
62e76326 289
528b2c61 290 return lowest.current;
291}
292
293/* XXX: This is wrong. It breaks *badly* on range combining */
294bool
295MemObject::readAheadPolicyCanRead() const
296{
f54986ad 297 const bool canRead = endOffset() - getReply()->hdr_sz <
9d4e9cfb 298 lowestMemReaderOffset() + Config.readAheadGap;
f54986ad
AR
299
300 if (!canRead) {
301 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
302 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
303 }
304
305 return canRead;
528b2c61 306}
307
308void
309MemObject::addClient(store_client *aClient)
310{
311 ++nclients;
312 dlinkAdd(aClient, &aClient->node, &clients);
313}
314
315#if URL_CHECKSUM_DEBUG
316void
317MemObject::checkUrlChecksum () const
318{
c877c0bc 319 assert(chksum == url_checksum(urlXXX()));
528b2c61 320}
62e76326 321
528b2c61 322#endif
323
324/*
325 * How much of the object data is on the disk?
326 */
47f6e231 327int64_t
528b2c61 328MemObject::objectBytesOnDisk() const
329{
330 /*
331 * NOTE: storeOffset() represents the disk file size,
332 * not the amount of object data on disk.
26ac0430 333 *
528b2c61 334 * If we don't have at least 'swap_hdr_sz' bytes
335 * then none of the object data is on disk.
336 *
337 * This should still be safe if swap_hdr_sz == 0,
338 * meaning we haven't even opened the swapout file
339 * yet.
340 */
62e76326 341
528b2c61 342 if (swapout.sio.getRaw() == NULL)
62e76326 343 return 0;
344
47f6e231 345 int64_t nwritten = swapout.sio->offset();
62e76326 346
ed013b6c 347 if (nwritten <= (int64_t)swap_hdr_sz)
62e76326 348 return 0;
349
47f6e231 350 return (nwritten - swap_hdr_sz);
528b2c61 351}
352
47f6e231 353int64_t
10aeba1d 354MemObject::policyLowestOffsetToKeep(bool swap) const
528b2c61 355{
356 /*
357 * Careful. lowest_offset can be greater than endOffset(), such
358 * as in the case of a range request.
359 */
47f6e231 360 int64_t lowest_offset = lowestMemReaderOffset();
62e76326 361
528b2c61 362 if (endOffset() < lowest_offset ||
ff4b33f4 363 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
10aeba1d 364 (swap && !Config.onoff.memory_cache_first))
62e76326 365 return lowest_offset;
366
528b2c61 367 return inmem_lo;
368}
369
370void
371MemObject::trimSwappable()
372{
10aeba1d 373 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
528b2c61 374 /*
375 * We should only free up to what we know has been written
376 * to disk, not what has been queued for writing. Otherwise
377 * there will be a chunk of the data which is not in memory
378 * and is not yet on disk.
379 * The -1 makes sure the page isn't freed until storeSwapOut has
aa1a691e 380 * walked to the next page.
528b2c61 381 */
47f6e231 382 int64_t on_disk;
62e76326 383
528b2c61 384 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
62e76326 385 new_mem_lo = on_disk - 1;
386
528b2c61 387 if (new_mem_lo == -1)
f53969cc 388 new_mem_lo = 0; /* the above might become -1 */
62e76326 389
528b2c61 390 data_hdr.freeDataUpto(new_mem_lo);
62e76326 391
528b2c61 392 inmem_lo = new_mem_lo;
393}
394
395void
396MemObject::trimUnSwappable()
397{
99921d9d
AR
398 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
399 assert (new_mem_lo > 0);
400 data_hdr.freeDataUpto(new_mem_lo);
401 inmem_lo = new_mem_lo;
402 } // else we should not trim anything at this time
528b2c61 403}
404
528b2c61 405bool
406MemObject::isContiguous() const
407{
47f6e231 408 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
528b2c61 409 /* XXX : make this higher level */
bf8fe701 410 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
528b2c61 411 return result;
412}
b67e2c8c 413
414int
384a7590 415MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
b67e2c8c 416{
9a0a18de 417#if USE_DELAY_POOLS
384a7590
JP
418 if (!ignoreDelayPools) {
419 /* identify delay id with largest allowance */
420 DelayId largestAllowance = mostBytesAllowed ();
421 return largestAllowance.bytesWanted(0, max);
422 }
423#endif
62e76326 424
b67e2c8c 425 return max;
b67e2c8c 426}
427
a46d2c0e 428void
429MemObject::setNoDelay(bool const newValue)
430{
9a0a18de 431#if USE_DELAY_POOLS
a46d2c0e 432
433 for (dlink_node *node = clients.head; node; node = node->next) {
434 store_client *sc = (store_client *) node->data;
435 sc->delayId.setNoDelay(newValue);
436 }
437
438#endif
439}
440
441void
442MemObject::delayRead(DeferredRead const &aRead)
443{
f1ba1fba 444#if USE_DELAY_POOLS
e71eae24
SM
445 if (readAheadPolicyCanRead()) {
446 if (DelayId mostAllowedId = mostBytesAllowed()) {
447 mostAllowedId.delayRead(aRead);
448 return;
f1ba1fba 449 }
e71eae24 450 }
f1ba1fba 451#endif
a46d2c0e 452 deferredReads.delayRead(aRead);
453}
454
455void
456MemObject::kickReads()
457{
458 deferredReads.kickReads(-1);
459}
460
9a0a18de 461#if USE_DELAY_POOLS
b67e2c8c 462DelayId
463MemObject::mostBytesAllowed() const
464{
465 int j;
466 int jmax = -1;
467 DelayId result;
62e76326 468
b67e2c8c 469 for (dlink_node *node = clients.head; node; node = node->next) {
62e76326 470 store_client *sc = (store_client *) node->data;
d576a6a6 471#if 0
62e76326 472 /* This test is invalid because the client may be writing data
473 * and thus will want data immediately.
474 * If we include the test, there is a race condition when too much
475 * data is read - if all sc's are writing when a read is scheduled.
476 * XXX: fixme.
477 */
478
479 if (!sc->callbackPending())
480 /* not waiting for more data */
481 continue;
482
d576a6a6 483#endif
62e76326 484
62e76326 485 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
486
487 if (j > jmax) {
488 jmax = j;
489 result = sc->delayId;
490 }
b67e2c8c 491 }
62e76326 492
b67e2c8c 493 return result;
494}
62e76326 495
b67e2c8c 496#endif
5b55f1f1
CT
497
498int64_t
499MemObject::availableForSwapOut() const
500{
501 return endOffset() - swapout.queue_offset;
502}
f53969cc 503