]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Docs: Copyright updates for 2018 (#114)
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "MemBuf.h"
17 #include "MemObject.h"
18 #include "profiler/Profiler.h"
19 #include "SquidConfig.h"
20 #include "Store.h"
21 #include "StoreClient.h"
22
23 #if USE_DELAY_POOLS
24 #include "DelayPools.h"
25 #endif
26
27 /* TODO: make this global or private */
28 #if URL_CHECKSUM_DEBUG
29 static unsigned int url_checksum(const char *url);
30 unsigned int
31 url_checksum(const char *url)
32 {
33 unsigned int ck;
34 SquidMD5_CTX M;
35 static unsigned char digest[16];
36 SquidMD5Init(&M);
37 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
38 SquidMD5Final(digest, &M);
39 memcpy(&ck, digest, sizeof(ck));
40 return ck;
41 }
42
43 #endif
44
45 RemovalPolicy * mem_policy = NULL;
46
47 size_t
48 MemObject::inUseCount()
49 {
50 return Pool().inUseCount();
51 }
52
53 const char *
54 MemObject::storeId() const
55 {
56 if (!storeId_.size()) {
57 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
58 dump();
59 storeId_ = "[unknown_URI]";
60 }
61 return storeId_.termedBuf();
62 }
63
64 const char *
65 MemObject::logUri() const
66 {
67 return logUri_.size() ? logUri_.termedBuf() : storeId();
68 }
69
70 bool
71 MemObject::hasUris() const
72 {
73 return storeId_.size();
74 }
75
76 void
77 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
78 {
79 if (hasUris())
80 return;
81
82 storeId_ = aStoreId;
83 debugs(88, 3, this << " storeId: " << storeId_);
84
85 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
86 if (!aLogUri || aLogUri == aStoreId)
87 logUri_.clean(); // use storeId_ by default to minimize copying
88 else
89 logUri_ = aLogUri;
90
91 method = aMethod;
92
93 #if URL_CHECKSUM_DEBUG
94 chksum = url_checksum(urlXXX());
95 #endif
96 }
97
98 MemObject::MemObject() :
99 inmem_lo(0),
100 nclients(0),
101 smpCollapsed(false),
102 ping_reply_callback(nullptr),
103 ircb_data(nullptr),
104 id(0),
105 object_sz(-1),
106 swap_hdr_sz(0),
107 #if URL_CHECKSUM_DEBUG
108 chksum(0),
109 #endif
110 vary_headers(nullptr)
111 {
112 debugs(20, 3, "new MemObject " << this);
113 memset(&start_ping, 0, sizeof(start_ping));
114 memset(&abort, 0, sizeof(abort));
115 reply_ = new HttpReply;
116 }
117
118 MemObject::~MemObject()
119 {
120 debugs(20, 3, "del MemObject " << this);
121 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
122
123 #if URL_CHECKSUM_DEBUG
124 checkUrlChecksum();
125 #endif
126
127 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
128 assert(xitTable.index < 0);
129 assert(memCache.index < 0);
130 assert(swapout.sio == NULL);
131 }
132
133 data_hdr.freeContent();
134
135 #if 0
136 /*
137 * There is no way to abort FD-less clients, so they might
138 * still have mem->clients set.
139 */
140 assert(clients.head == NULL);
141
142 #endif
143
144 ctx_exit(ctx); /* must exit before we free mem->url */
145 }
146
147 void
148 MemObject::write(const StoreIOBuffer &writeBuffer)
149 {
150 PROF_start(MemObject_write);
151 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
152
153 /* We don't separate out mime headers yet, so ensure that the first
154 * write is at offset 0 - where they start
155 */
156 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
157
158 assert (data_hdr.write (writeBuffer));
159 PROF_stop(MemObject_write);
160 }
161
162 void
163 MemObject::dump() const
164 {
165 data_hdr.dump();
166 #if 0
167 /* do we want this one? */
168 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
169 #endif
170
171 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
172 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
173 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
174 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
175 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << reply_);
176 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
177 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
178 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
179 }
180
181 struct LowestMemReader : public unary_function<store_client, void> {
182 LowestMemReader(int64_t seed):current(seed) {}
183
184 void operator() (store_client const &x) {
185 if (x.memReaderHasLowerOffset(current))
186 current = x.copyInto.offset;
187 }
188
189 int64_t current;
190 };
191
192 struct StoreClientStats : public unary_function<store_client, void> {
193 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
194
195 void operator()(store_client const &x) {
196 x.dumpStats(where, index);
197 ++index;
198 }
199
200 MemBuf *where;
201 size_t index;
202 };
203
204 void
205 MemObject::stat(MemBuf * mb) const
206 {
207 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
208 if (!vary_headers.isEmpty())
209 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
210 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
211 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
212 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
213
214 if (swapout.sio.getRaw())
215 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
216
217 if (xitTable.index >= 0)
218 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
219 if (memCache.index >= 0)
220 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
221 if (object_sz >= 0)
222 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
223 if (smpCollapsed)
224 mb->appendf("\tsmp-collapsed\n");
225
226 StoreClientStats statsVisitor(mb);
227
228 for_each<StoreClientStats>(clients, statsVisitor);
229 }
230
231 int64_t
232 MemObject::endOffset () const
233 {
234 return data_hdr.endOffset();
235 }
236
237 void
238 MemObject::markEndOfReplyHeaders()
239 {
240 const int hdr_sz = endOffset();
241 assert(hdr_sz >= 0);
242 assert(reply_);
243 reply_->hdr_sz = hdr_sz;
244 }
245
246 int64_t
247 MemObject::size() const
248 {
249 if (object_sz < 0)
250 return endOffset();
251
252 return object_sz;
253 }
254
255 int64_t
256 MemObject::expectedReplySize() const
257 {
258 debugs(20, 7, "object_sz: " << object_sz);
259 if (object_sz >= 0) // complete() has been called; we know the exact answer
260 return object_sz;
261
262 if (reply_) {
263 const int64_t clen = reply_->bodySize(method);
264 debugs(20, 7, "clen: " << clen);
265 if (clen >= 0 && reply_->hdr_sz > 0) // yuck: Http::Message sets hdr_sz to 0
266 return clen + reply_->hdr_sz;
267 }
268
269 return -1; // not enough information to predict
270 }
271
272 void
273 MemObject::reset()
274 {
275 assert(swapout.sio == NULL);
276 data_hdr.freeContent();
277 inmem_lo = 0;
278 /* Should we check for clients? */
279 if (reply_)
280 reply_->reset();
281 }
282
283 int64_t
284 MemObject::lowestMemReaderOffset() const
285 {
286 LowestMemReader lowest (endOffset() + 1);
287
288 for_each <LowestMemReader>(clients, lowest);
289
290 return lowest.current;
291 }
292
293 /* XXX: This is wrong. It breaks *badly* on range combining */
294 bool
295 MemObject::readAheadPolicyCanRead() const
296 {
297 const bool canRead = endOffset() - getReply()->hdr_sz <
298 lowestMemReaderOffset() + Config.readAheadGap;
299
300 if (!canRead) {
301 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
302 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
303 }
304
305 return canRead;
306 }
307
308 void
309 MemObject::addClient(store_client *aClient)
310 {
311 ++nclients;
312 dlinkAdd(aClient, &aClient->node, &clients);
313 }
314
315 #if URL_CHECKSUM_DEBUG
316 void
317 MemObject::checkUrlChecksum () const
318 {
319 assert(chksum == url_checksum(urlXXX()));
320 }
321
322 #endif
323
324 /*
325 * How much of the object data is on the disk?
326 */
327 int64_t
328 MemObject::objectBytesOnDisk() const
329 {
330 /*
331 * NOTE: storeOffset() represents the disk file size,
332 * not the amount of object data on disk.
333 *
334 * If we don't have at least 'swap_hdr_sz' bytes
335 * then none of the object data is on disk.
336 *
337 * This should still be safe if swap_hdr_sz == 0,
338 * meaning we haven't even opened the swapout file
339 * yet.
340 */
341
342 if (swapout.sio.getRaw() == NULL)
343 return 0;
344
345 int64_t nwritten = swapout.sio->offset();
346
347 if (nwritten <= (int64_t)swap_hdr_sz)
348 return 0;
349
350 return (nwritten - swap_hdr_sz);
351 }
352
353 int64_t
354 MemObject::policyLowestOffsetToKeep(bool swap) const
355 {
356 /*
357 * Careful. lowest_offset can be greater than endOffset(), such
358 * as in the case of a range request.
359 */
360 int64_t lowest_offset = lowestMemReaderOffset();
361
362 if (endOffset() < lowest_offset ||
363 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
364 (swap && !Config.onoff.memory_cache_first))
365 return lowest_offset;
366
367 return inmem_lo;
368 }
369
370 void
371 MemObject::trimSwappable()
372 {
373 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
374 /*
375 * We should only free up to what we know has been written
376 * to disk, not what has been queued for writing. Otherwise
377 * there will be a chunk of the data which is not in memory
378 * and is not yet on disk.
379 * The -1 makes sure the page isn't freed until storeSwapOut has
380 * walked to the next page.
381 */
382 int64_t on_disk;
383
384 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
385 new_mem_lo = on_disk - 1;
386
387 if (new_mem_lo == -1)
388 new_mem_lo = 0; /* the above might become -1 */
389
390 data_hdr.freeDataUpto(new_mem_lo);
391
392 inmem_lo = new_mem_lo;
393 }
394
395 void
396 MemObject::trimUnSwappable()
397 {
398 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
399 assert (new_mem_lo > 0);
400 data_hdr.freeDataUpto(new_mem_lo);
401 inmem_lo = new_mem_lo;
402 } // else we should not trim anything at this time
403 }
404
405 bool
406 MemObject::isContiguous() const
407 {
408 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
409 /* XXX : make this higher level */
410 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
411 return result;
412 }
413
414 int
415 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
416 {
417 #if USE_DELAY_POOLS
418 if (!ignoreDelayPools) {
419 /* identify delay id with largest allowance */
420 DelayId largestAllowance = mostBytesAllowed ();
421 return largestAllowance.bytesWanted(0, max);
422 }
423 #endif
424
425 return max;
426 }
427
428 void
429 MemObject::setNoDelay(bool const newValue)
430 {
431 #if USE_DELAY_POOLS
432
433 for (dlink_node *node = clients.head; node; node = node->next) {
434 store_client *sc = (store_client *) node->data;
435 sc->delayId.setNoDelay(newValue);
436 }
437
438 #endif
439 }
440
441 void
442 MemObject::delayRead(DeferredRead const &aRead)
443 {
444 #if USE_DELAY_POOLS
445 if (readAheadPolicyCanRead()) {
446 if (DelayId mostAllowedId = mostBytesAllowed()) {
447 mostAllowedId.delayRead(aRead);
448 return;
449 }
450 }
451 #endif
452 deferredReads.delayRead(aRead);
453 }
454
455 void
456 MemObject::kickReads()
457 {
458 deferredReads.kickReads(-1);
459 }
460
461 #if USE_DELAY_POOLS
462 DelayId
463 MemObject::mostBytesAllowed() const
464 {
465 int j;
466 int jmax = -1;
467 DelayId result;
468
469 for (dlink_node *node = clients.head; node; node = node->next) {
470 store_client *sc = (store_client *) node->data;
471 #if 0
472 /* This test is invalid because the client may be writing data
473 * and thus will want data immediately.
474 * If we include the test, there is a race condition when too much
475 * data is read - if all sc's are writing when a read is scheduled.
476 * XXX: fixme.
477 */
478
479 if (!sc->callbackPending())
480 /* not waiting for more data */
481 continue;
482
483 #endif
484
485 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
486
487 if (j > jmax) {
488 jmax = j;
489 result = sc->delayId;
490 }
491 }
492
493 return result;
494 }
495
496 #endif
497
498 int64_t
499 MemObject::availableForSwapOut() const
500 {
501 return endOffset() - swapout.queue_offset;
502 }
503