]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemObject.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / MemObject.cc
CommitLineData
528b2c61 1/*
bde978a6 2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
528b2c61 3 *
bbc27441
AJ
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
528b2c61 7 */
8
bbc27441
AJ
9/* DEBUG: section 19 Store Memory Primitives */
10
582c2af2 11#include "squid.h"
3e4bebf8 12#include "comm/Connection.h"
582c2af2 13#include "Generic.h"
af69c635 14#include "globals.h"
528b2c61 15#include "HttpReply.h"
582c2af2
FC
16#include "HttpRequest.h"
17#include "MemBuf.h"
18#include "MemObject.h"
19#include "profiler/Profiler.h"
4d5904f7 20#include "SquidConfig.h"
528b2c61 21#include "Store.h"
22#include "StoreClient.h"
582c2af2 23
9a0a18de 24#if USE_DELAY_POOLS
b67e2c8c 25#include "DelayPools.h"
26#endif
528b2c61 27
28/* TODO: make this global or private */
29#if URL_CHECKSUM_DEBUG
30static unsigned int url_checksum(const char *url);
31unsigned int
32url_checksum(const char *url)
33{
34 unsigned int ck;
c3031d67 35 SquidMD5_CTX M;
528b2c61 36 static unsigned char digest[16];
c3031d67 37 SquidMD5Init(&M);
38 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
39 SquidMD5Final(digest, &M);
41d00cd3 40 memcpy(&ck, digest, sizeof(ck));
528b2c61 41 return ck;
42}
62e76326 43
528b2c61 44#endif
45
aa839030 46RemovalPolicy * mem_policy = NULL;
47
528b2c61 48size_t
49MemObject::inUseCount()
50{
9f9e06f3 51 return Pool().inUseCount();
528b2c61 52}
53
c877c0bc 54const char *
9d4e9cfb
AR
55MemObject::storeId() const
56{
dcd84f80 57 if (!storeId_.size()) {
c877c0bc
AR
58 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
59 dump();
60 storeId_ = "[unknown_URI]";
61 }
62 return storeId_.termedBuf();
9487bae9
AR
63}
64
c877c0bc 65const char *
9d4e9cfb
AR
66MemObject::logUri() const
67{
dcd84f80 68 return logUri_.size() ? logUri_.termedBuf() : storeId();
c877c0bc 69}
4a56ee8d 70
c877c0bc 71bool
9d4e9cfb
AR
72MemObject::hasUris() const
73{
dcd84f80 74 return storeId_.size();
c877c0bc 75}
4a56ee8d 76
c877c0bc
AR
77void
78MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
79{
80 storeId_ = aStoreId;
81
82 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
9d4e9cfb 83 if (!aLogUri || aLogUri == aStoreId)
c877c0bc
AR
84 logUri_.clean(); // use storeId_ by default to minimize copying
85 else
86 logUri_ = aLogUri;
62e76326 87
c877c0bc 88 method = aMethod;
4a56ee8d 89
c877c0bc
AR
90#if URL_CHECKSUM_DEBUG
91 chksum = url_checksum(urlXXX());
528b2c61 92#endif
c877c0bc 93}
62e76326 94
c877c0bc
AR
95MemObject::MemObject(): smpCollapsed(false)
96{
97 debugs(20, 3, HERE << "new MemObject " << this);
98 _reply = new HttpReply;
99 HTTPMSGLOCK(_reply);
4a56ee8d 100
528b2c61 101 object_sz = -1;
4a56ee8d 102
528b2c61 103 /* XXX account log_url */
ddc9b32c
AR
104
105 swapout.decision = SwapOut::swNeedsCheck;
528b2c61 106}
107
108MemObject::~MemObject()
109{
e85137f1 110 debugs(20, 3, HERE << "del MemObject " << this);
0df20c61 111 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
62e76326 112
c877c0bc
AR
113#if URL_CHECKSUM_DEBUG
114 checkUrlChecksum();
528b2c61 115#endif
62e76326 116
4475555f 117 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
4475555f
AR
118 assert(xitTable.index < 0);
119 assert(memCache.index < 0);
528b2c61 120 assert(swapout.sio == NULL);
4475555f 121 }
62e76326 122
528b2c61 123 data_hdr.freeContent();
62e76326 124
9cdee68d 125#if 0
528b2c61 126 /*
127 * There is no way to abort FD-less clients, so they might
9cdee68d 128 * still have mem->clients set.
528b2c61 129 */
9cdee68d 130 assert(clients.head == NULL);
131
132#endif
62e76326 133
6dd9f4bd 134 HTTPMSGUNLOCK(_reply);
62e76326 135
6dd9f4bd 136 HTTPMSGUNLOCK(request);
62e76326 137
528b2c61 138 ctx_exit(ctx); /* must exit before we free mem->url */
62e76326 139
528b2c61 140 safe_free(vary_headers);
141}
142
143void
144MemObject::unlinkRequest()
145{
6dd9f4bd 146 HTTPMSGUNLOCK(request);
528b2c61 147}
148
149void
55759ffb 150MemObject::write(const StoreIOBuffer &writeBuffer)
528b2c61 151{
1d5161bd 152 PROF_start(MemObject_write);
4a7a3d56 153 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
528b2c61 154
528b2c61 155 /* We don't separate out mime headers yet, so ensure that the first
26ac0430 156 * write is at offset 0 - where they start
528b2c61 157 */
158 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
159
160 assert (data_hdr.write (writeBuffer));
1d5161bd 161 PROF_stop(MemObject_write);
528b2c61 162}
163
164void
165MemObject::dump() const
166{
42a503bd 167 data_hdr.dump();
528b2c61 168#if 0
169 /* do we want this one? */
e0236918 170 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
528b2c61 171#endif
62e76326 172
e0236918
FC
173 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
174 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
175 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
176 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
177 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
178 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
c877c0bc
AR
179 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
180 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
528b2c61 181}
182
183HttpReply const *
184MemObject::getReply() const
185{
186 return _reply;
187}
188
4a56ee8d 189void
190MemObject::replaceHttpReply(HttpReply *newrep)
191{
6dd9f4bd 192 HTTPMSGUNLOCK(_reply);
b248c2a3
AJ
193 _reply = newrep;
194 HTTPMSGLOCK(_reply);
4a56ee8d 195}
196
26ac0430
AJ
197struct LowestMemReader : public unary_function<store_client, void> {
198 LowestMemReader(int64_t seed):current(seed) {}
62e76326 199
26ac0430 200 void operator() (store_client const &x) {
62e76326 201 if (x.memReaderHasLowerOffset(current))
202 current = x.copyInto.offset;
203 }
204
47f6e231 205 int64_t current;
528b2c61 206};
207
26ac0430
AJ
208struct StoreClientStats : public unary_function<store_client, void> {
209 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
62e76326 210
26ac0430 211 void operator()(store_client const &x) {
aec55359
FC
212 x.dumpStats(where, index);
213 ++index;
528b2c61 214 }
62e76326 215
fcc35180 216 MemBuf *where;
528b2c61 217 size_t index;
218};
219
220void
83af6fa2 221MemObject::stat(MemBuf * mb) const
528b2c61 222{
7f06a3d8 223 mb->Printf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
83af6fa2
AJ
224 if (vary_headers)
225 mb->Printf("\tvary_headers: %s\n", vary_headers);
c91ca3ce
DK
226 mb->Printf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
227 mb->Printf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
228 mb->Printf("\tswapout: %" PRId64 " bytes queued\n",
47f6e231 229 swapout.queue_offset);
62e76326 230
528b2c61 231 if (swapout.sio.getRaw())
c91ca3ce 232 mb->Printf("\tswapout: %" PRId64 " bytes written\n",
47f6e231 233 (int64_t) swapout.sio->offset());
62e76326 234
752fd8d2
AR
235 if (xitTable.index >= 0)
236 mb->Printf("\ttransient index: %d state: %d\n",
237 xitTable.index, xitTable.io);
238 if (memCache.index >= 0)
239 mb->Printf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n",
240 memCache.index, memCache.io, memCache.offset);
241 if (object_sz >= 0)
242 mb->Printf("\tobject_sz: %" PRId64 "\n", object_sz);
243 if (smpCollapsed)
244 mb->Printf("\tsmp-collapsed\n");
245
fcc35180 246 StoreClientStats statsVisitor(mb);
62e76326 247
4cbb7fa8 248 for_each<StoreClientStats>(clients, statsVisitor);
528b2c61 249}
250
47f6e231 251int64_t
528b2c61 252MemObject::endOffset () const
253{
254 return data_hdr.endOffset();
255}
256
3756e5c0
AR
257void
258MemObject::markEndOfReplyHeaders()
259{
260 const int hdr_sz = endOffset();
261 assert(hdr_sz >= 0);
262 assert(_reply);
263 _reply->hdr_sz = hdr_sz;
264}
265
47f6e231 266int64_t
528b2c61 267MemObject::size() const
268{
62e76326 269 if (object_sz < 0)
270 return endOffset();
271
528b2c61 272 return object_sz;
273}
274
aa1a691e 275int64_t
9199139f
AR
276MemObject::expectedReplySize() const
277{
aa1a691e
AR
278 debugs(20, 7, HERE << "object_sz: " << object_sz);
279 if (object_sz >= 0) // complete() has been called; we know the exact answer
280 return object_sz;
281
282 if (_reply) {
283 const int64_t clen = _reply->bodySize(method);
284 debugs(20, 7, HERE << "clen: " << clen);
285 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
286 return clen + _reply->hdr_sz;
287 }
288
289 return -1; // not enough information to predict
290}
291
528b2c61 292void
293MemObject::reset()
294{
295 assert(swapout.sio == NULL);
296 data_hdr.freeContent();
297 inmem_lo = 0;
298 /* Should we check for clients? */
299}
300
47f6e231 301int64_t
528b2c61 302MemObject::lowestMemReaderOffset() const
303{
304 LowestMemReader lowest (endOffset() + 1);
305
4cbb7fa8 306 for_each <LowestMemReader>(clients, lowest);
62e76326 307
528b2c61 308 return lowest.current;
309}
310
311/* XXX: This is wrong. It breaks *badly* on range combining */
312bool
313MemObject::readAheadPolicyCanRead() const
314{
f54986ad 315 const bool canRead = endOffset() - getReply()->hdr_sz <
9d4e9cfb 316 lowestMemReaderOffset() + Config.readAheadGap;
f54986ad
AR
317
318 if (!canRead) {
319 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
320 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
321 }
322
323 return canRead;
528b2c61 324}
325
326void
327MemObject::addClient(store_client *aClient)
328{
329 ++nclients;
330 dlinkAdd(aClient, &aClient->node, &clients);
331}
332
333#if URL_CHECKSUM_DEBUG
334void
335MemObject::checkUrlChecksum () const
336{
c877c0bc 337 assert(chksum == url_checksum(urlXXX()));
528b2c61 338}
62e76326 339
528b2c61 340#endif
341
342/*
343 * How much of the object data is on the disk?
344 */
47f6e231 345int64_t
528b2c61 346MemObject::objectBytesOnDisk() const
347{
348 /*
349 * NOTE: storeOffset() represents the disk file size,
350 * not the amount of object data on disk.
26ac0430 351 *
528b2c61 352 * If we don't have at least 'swap_hdr_sz' bytes
353 * then none of the object data is on disk.
354 *
355 * This should still be safe if swap_hdr_sz == 0,
356 * meaning we haven't even opened the swapout file
357 * yet.
358 */
62e76326 359
528b2c61 360 if (swapout.sio.getRaw() == NULL)
62e76326 361 return 0;
362
47f6e231 363 int64_t nwritten = swapout.sio->offset();
62e76326 364
ed013b6c 365 if (nwritten <= (int64_t)swap_hdr_sz)
62e76326 366 return 0;
367
47f6e231 368 return (nwritten - swap_hdr_sz);
528b2c61 369}
370
47f6e231 371int64_t
10aeba1d 372MemObject::policyLowestOffsetToKeep(bool swap) const
528b2c61 373{
374 /*
375 * Careful. lowest_offset can be greater than endOffset(), such
376 * as in the case of a range request.
377 */
47f6e231 378 int64_t lowest_offset = lowestMemReaderOffset();
62e76326 379
528b2c61 380 if (endOffset() < lowest_offset ||
ff4b33f4 381 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
10aeba1d 382 (swap && !Config.onoff.memory_cache_first))
62e76326 383 return lowest_offset;
384
528b2c61 385 return inmem_lo;
386}
387
388void
389MemObject::trimSwappable()
390{
10aeba1d 391 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
528b2c61 392 /*
393 * We should only free up to what we know has been written
394 * to disk, not what has been queued for writing. Otherwise
395 * there will be a chunk of the data which is not in memory
396 * and is not yet on disk.
397 * The -1 makes sure the page isn't freed until storeSwapOut has
aa1a691e 398 * walked to the next page.
528b2c61 399 */
47f6e231 400 int64_t on_disk;
62e76326 401
528b2c61 402 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
62e76326 403 new_mem_lo = on_disk - 1;
404
528b2c61 405 if (new_mem_lo == -1)
f53969cc 406 new_mem_lo = 0; /* the above might become -1 */
62e76326 407
528b2c61 408 data_hdr.freeDataUpto(new_mem_lo);
62e76326 409
528b2c61 410 inmem_lo = new_mem_lo;
411}
412
413void
414MemObject::trimUnSwappable()
415{
99921d9d
AR
416 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
417 assert (new_mem_lo > 0);
418 data_hdr.freeDataUpto(new_mem_lo);
419 inmem_lo = new_mem_lo;
420 } // else we should not trim anything at this time
528b2c61 421}
422
528b2c61 423bool
424MemObject::isContiguous() const
425{
47f6e231 426 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
528b2c61 427 /* XXX : make this higher level */
bf8fe701 428 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
528b2c61 429 return result;
430}
b67e2c8c 431
432int
384a7590 433MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
b67e2c8c 434{
9a0a18de 435#if USE_DELAY_POOLS
384a7590
JP
436 if (!ignoreDelayPools) {
437 /* identify delay id with largest allowance */
438 DelayId largestAllowance = mostBytesAllowed ();
439 return largestAllowance.bytesWanted(0, max);
440 }
441#endif
62e76326 442
b67e2c8c 443 return max;
b67e2c8c 444}
445
a46d2c0e 446void
447MemObject::setNoDelay(bool const newValue)
448{
9a0a18de 449#if USE_DELAY_POOLS
a46d2c0e 450
451 for (dlink_node *node = clients.head; node; node = node->next) {
452 store_client *sc = (store_client *) node->data;
453 sc->delayId.setNoDelay(newValue);
454 }
455
456#endif
457}
458
459void
460MemObject::delayRead(DeferredRead const &aRead)
461{
462 deferredReads.delayRead(aRead);
463}
464
465void
466MemObject::kickReads()
467{
468 deferredReads.kickReads(-1);
469}
470
9a0a18de 471#if USE_DELAY_POOLS
b67e2c8c 472DelayId
473MemObject::mostBytesAllowed() const
474{
475 int j;
476 int jmax = -1;
477 DelayId result;
62e76326 478
b67e2c8c 479 for (dlink_node *node = clients.head; node; node = node->next) {
62e76326 480 store_client *sc = (store_client *) node->data;
d576a6a6 481#if 0
62e76326 482 /* This test is invalid because the client may be writing data
483 * and thus will want data immediately.
484 * If we include the test, there is a race condition when too much
485 * data is read - if all sc's are writing when a read is scheduled.
486 * XXX: fixme.
487 */
488
489 if (!sc->callbackPending())
490 /* not waiting for more data */
491 continue;
492
d576a6a6 493#endif
62e76326 494
62e76326 495 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
496
497 if (j > jmax) {
498 jmax = j;
499 result = sc->delayId;
500 }
b67e2c8c 501 }
62e76326 502
b67e2c8c 503 return result;
504}
62e76326 505
b67e2c8c 506#endif
5b55f1f1
CT
507
508int64_t
509MemObject::availableForSwapOut() const
510{
511 return endOffset() - swapout.queue_offset;
512}
f53969cc 513