]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "MemBuf.h"
17 #include "MemObject.h"
18 #include "profiler/Profiler.h"
19 #include "SquidConfig.h"
20 #include "Store.h"
21 #include "StoreClient.h"
22
23 #if USE_DELAY_POOLS
24 #include "DelayPools.h"
25 #endif
26
27 /* TODO: make this global or private */
28 #if URL_CHECKSUM_DEBUG
29 static unsigned int url_checksum(const char *url);
30 unsigned int
31 url_checksum(const char *url)
32 {
33 unsigned int ck;
34 SquidMD5_CTX M;
35 static unsigned char digest[16];
36 SquidMD5Init(&M);
37 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
38 SquidMD5Final(digest, &M);
39 memcpy(&ck, digest, sizeof(ck));
40 return ck;
41 }
42
43 #endif
44
45 RemovalPolicy * mem_policy = NULL;
46
47 size_t
48 MemObject::inUseCount()
49 {
50 return Pool().inUseCount();
51 }
52
53 const char *
54 MemObject::storeId() const
55 {
56 if (!storeId_.size()) {
57 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
58 dump();
59 storeId_ = "[unknown_URI]";
60 }
61 return storeId_.termedBuf();
62 }
63
64 const char *
65 MemObject::logUri() const
66 {
67 return logUri_.size() ? logUri_.termedBuf() : storeId();
68 }
69
70 bool
71 MemObject::hasUris() const
72 {
73 return storeId_.size();
74 }
75
76 void
77 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
78 {
79 storeId_ = aStoreId;
80
81 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
82 if (!aLogUri || aLogUri == aStoreId)
83 logUri_.clean(); // use storeId_ by default to minimize copying
84 else
85 logUri_ = aLogUri;
86
87 method = aMethod;
88
89 #if URL_CHECKSUM_DEBUG
90 chksum = url_checksum(urlXXX());
91 #endif
92 }
93
94 MemObject::MemObject() :
95 inmem_lo(0),
96 nclients(0),
97 smpCollapsed(false),
98 ping_reply_callback(nullptr),
99 ircb_data(nullptr),
100 id(0),
101 object_sz(-1),
102 swap_hdr_sz(0),
103 #if URL_CHECKSUM_DEBUG
104 chksum(0),
105 #endif
106 vary_headers(nullptr)
107 {
108 debugs(20, 3, "new MemObject " << this);
109 memset(&start_ping, 0, sizeof(start_ping));
110 memset(&abort, 0, sizeof(abort));
111 reply_ = new HttpReply;
112 }
113
114 MemObject::~MemObject()
115 {
116 debugs(20, 3, "del MemObject " << this);
117 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
118
119 #if URL_CHECKSUM_DEBUG
120 checkUrlChecksum();
121 #endif
122
123 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
124 assert(xitTable.index < 0);
125 assert(memCache.index < 0);
126 assert(swapout.sio == NULL);
127 }
128
129 data_hdr.freeContent();
130
131 #if 0
132 /*
133 * There is no way to abort FD-less clients, so they might
134 * still have mem->clients set.
135 */
136 assert(clients.head == NULL);
137
138 #endif
139
140 ctx_exit(ctx); /* must exit before we free mem->url */
141 }
142
143 void
144 MemObject::write(const StoreIOBuffer &writeBuffer)
145 {
146 PROF_start(MemObject_write);
147 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
148
149 /* We don't separate out mime headers yet, so ensure that the first
150 * write is at offset 0 - where they start
151 */
152 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
153
154 assert (data_hdr.write (writeBuffer));
155 PROF_stop(MemObject_write);
156 }
157
158 void
159 MemObject::dump() const
160 {
161 data_hdr.dump();
162 #if 0
163 /* do we want this one? */
164 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
165 #endif
166
167 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
168 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
169 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
170 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
171 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << reply_);
172 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
173 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
174 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
175 }
176
177 struct LowestMemReader : public unary_function<store_client, void> {
178 LowestMemReader(int64_t seed):current(seed) {}
179
180 void operator() (store_client const &x) {
181 if (x.memReaderHasLowerOffset(current))
182 current = x.copyInto.offset;
183 }
184
185 int64_t current;
186 };
187
188 struct StoreClientStats : public unary_function<store_client, void> {
189 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
190
191 void operator()(store_client const &x) {
192 x.dumpStats(where, index);
193 ++index;
194 }
195
196 MemBuf *where;
197 size_t index;
198 };
199
200 void
201 MemObject::stat(MemBuf * mb) const
202 {
203 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
204 if (!vary_headers.isEmpty())
205 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
206 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
207 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
208 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
209
210 if (swapout.sio.getRaw())
211 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
212
213 if (xitTable.index >= 0)
214 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
215 if (memCache.index >= 0)
216 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
217 if (object_sz >= 0)
218 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
219 if (smpCollapsed)
220 mb->appendf("\tsmp-collapsed\n");
221
222 StoreClientStats statsVisitor(mb);
223
224 for_each<StoreClientStats>(clients, statsVisitor);
225 }
226
227 int64_t
228 MemObject::endOffset () const
229 {
230 return data_hdr.endOffset();
231 }
232
233 void
234 MemObject::markEndOfReplyHeaders()
235 {
236 const int hdr_sz = endOffset();
237 assert(hdr_sz >= 0);
238 assert(reply_);
239 reply_->hdr_sz = hdr_sz;
240 }
241
242 int64_t
243 MemObject::size() const
244 {
245 if (object_sz < 0)
246 return endOffset();
247
248 return object_sz;
249 }
250
251 int64_t
252 MemObject::expectedReplySize() const
253 {
254 debugs(20, 7, "object_sz: " << object_sz);
255 if (object_sz >= 0) // complete() has been called; we know the exact answer
256 return object_sz;
257
258 if (reply_) {
259 const int64_t clen = reply_->bodySize(method);
260 debugs(20, 7, "clen: " << clen);
261 if (clen >= 0 && reply_->hdr_sz > 0) // yuck: Http::Message sets hdr_sz to 0
262 return clen + reply_->hdr_sz;
263 }
264
265 return -1; // not enough information to predict
266 }
267
268 void
269 MemObject::reset()
270 {
271 assert(swapout.sio == NULL);
272 data_hdr.freeContent();
273 inmem_lo = 0;
274 /* Should we check for clients? */
275 if (reply_)
276 reply_->reset();
277 }
278
279 int64_t
280 MemObject::lowestMemReaderOffset() const
281 {
282 LowestMemReader lowest (endOffset() + 1);
283
284 for_each <LowestMemReader>(clients, lowest);
285
286 return lowest.current;
287 }
288
289 /* XXX: This is wrong. It breaks *badly* on range combining */
290 bool
291 MemObject::readAheadPolicyCanRead() const
292 {
293 const bool canRead = endOffset() - getReply()->hdr_sz <
294 lowestMemReaderOffset() + Config.readAheadGap;
295
296 if (!canRead) {
297 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
298 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
299 }
300
301 return canRead;
302 }
303
304 void
305 MemObject::addClient(store_client *aClient)
306 {
307 ++nclients;
308 dlinkAdd(aClient, &aClient->node, &clients);
309 }
310
311 #if URL_CHECKSUM_DEBUG
312 void
313 MemObject::checkUrlChecksum () const
314 {
315 assert(chksum == url_checksum(urlXXX()));
316 }
317
318 #endif
319
320 /*
321 * How much of the object data is on the disk?
322 */
323 int64_t
324 MemObject::objectBytesOnDisk() const
325 {
326 /*
327 * NOTE: storeOffset() represents the disk file size,
328 * not the amount of object data on disk.
329 *
330 * If we don't have at least 'swap_hdr_sz' bytes
331 * then none of the object data is on disk.
332 *
333 * This should still be safe if swap_hdr_sz == 0,
334 * meaning we haven't even opened the swapout file
335 * yet.
336 */
337
338 if (swapout.sio.getRaw() == NULL)
339 return 0;
340
341 int64_t nwritten = swapout.sio->offset();
342
343 if (nwritten <= (int64_t)swap_hdr_sz)
344 return 0;
345
346 return (nwritten - swap_hdr_sz);
347 }
348
349 int64_t
350 MemObject::policyLowestOffsetToKeep(bool swap) const
351 {
352 /*
353 * Careful. lowest_offset can be greater than endOffset(), such
354 * as in the case of a range request.
355 */
356 int64_t lowest_offset = lowestMemReaderOffset();
357
358 if (endOffset() < lowest_offset ||
359 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
360 (swap && !Config.onoff.memory_cache_first))
361 return lowest_offset;
362
363 return inmem_lo;
364 }
365
366 void
367 MemObject::trimSwappable()
368 {
369 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
370 /*
371 * We should only free up to what we know has been written
372 * to disk, not what has been queued for writing. Otherwise
373 * there will be a chunk of the data which is not in memory
374 * and is not yet on disk.
375 * The -1 makes sure the page isn't freed until storeSwapOut has
376 * walked to the next page.
377 */
378 int64_t on_disk;
379
380 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
381 new_mem_lo = on_disk - 1;
382
383 if (new_mem_lo == -1)
384 new_mem_lo = 0; /* the above might become -1 */
385
386 data_hdr.freeDataUpto(new_mem_lo);
387
388 inmem_lo = new_mem_lo;
389 }
390
391 void
392 MemObject::trimUnSwappable()
393 {
394 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
395 assert (new_mem_lo > 0);
396 data_hdr.freeDataUpto(new_mem_lo);
397 inmem_lo = new_mem_lo;
398 } // else we should not trim anything at this time
399 }
400
401 bool
402 MemObject::isContiguous() const
403 {
404 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
405 /* XXX : make this higher level */
406 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
407 return result;
408 }
409
410 int
411 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
412 {
413 #if USE_DELAY_POOLS
414 if (!ignoreDelayPools) {
415 /* identify delay id with largest allowance */
416 DelayId largestAllowance = mostBytesAllowed ();
417 return largestAllowance.bytesWanted(0, max);
418 }
419 #endif
420
421 return max;
422 }
423
424 void
425 MemObject::setNoDelay(bool const newValue)
426 {
427 #if USE_DELAY_POOLS
428
429 for (dlink_node *node = clients.head; node; node = node->next) {
430 store_client *sc = (store_client *) node->data;
431 sc->delayId.setNoDelay(newValue);
432 }
433
434 #endif
435 }
436
437 void
438 MemObject::delayRead(DeferredRead const &aRead)
439 {
440 #if USE_DELAY_POOLS
441 if (readAheadPolicyCanRead()) {
442 if (DelayId mostAllowedId = mostBytesAllowed()) {
443 mostAllowedId.delayRead(aRead);
444 return;
445 }
446 }
447 #endif
448 deferredReads.delayRead(aRead);
449 }
450
451 void
452 MemObject::kickReads()
453 {
454 deferredReads.kickReads(-1);
455 }
456
457 #if USE_DELAY_POOLS
458 DelayId
459 MemObject::mostBytesAllowed() const
460 {
461 int j;
462 int jmax = -1;
463 DelayId result;
464
465 for (dlink_node *node = clients.head; node; node = node->next) {
466 store_client *sc = (store_client *) node->data;
467 #if 0
468 /* This test is invalid because the client may be writing data
469 * and thus will want data immediately.
470 * If we include the test, there is a race condition when too much
471 * data is read - if all sc's are writing when a read is scheduled.
472 * XXX: fixme.
473 */
474
475 if (!sc->callbackPending())
476 /* not waiting for more data */
477 continue;
478
479 #endif
480
481 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
482
483 if (j > jmax) {
484 jmax = j;
485 result = sc->delayId;
486 }
487 }
488
489 return result;
490 }
491
492 #endif
493
494 int64_t
495 MemObject::availableForSwapOut() const
496 {
497 return endOffset() - swapout.queue_offset;
498 }
499