]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
50f0a03d1f9c0bcb786511f66b4d9015bc51ea68
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "HttpRequest.h"
17 #include "MemBuf.h"
18 #include "MemObject.h"
19 #include "profiler/Profiler.h"
20 #include "SquidConfig.h"
21 #include "Store.h"
22 #include "StoreClient.h"
23
24 #if USE_DELAY_POOLS
25 #include "DelayPools.h"
26 #endif
27
28 /* TODO: make this global or private */
29 #if URL_CHECKSUM_DEBUG
30 static unsigned int url_checksum(const char *url);
31 unsigned int
32 url_checksum(const char *url)
33 {
34 unsigned int ck;
35 SquidMD5_CTX M;
36 static unsigned char digest[16];
37 SquidMD5Init(&M);
38 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
39 SquidMD5Final(digest, &M);
40 memcpy(&ck, digest, sizeof(ck));
41 return ck;
42 }
43
44 #endif
45
46 RemovalPolicy * mem_policy = NULL;
47
48 size_t
49 MemObject::inUseCount()
50 {
51 return Pool().inUseCount();
52 }
53
54 const char *
55 MemObject::storeId() const
56 {
57 if (!storeId_.size()) {
58 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
59 dump();
60 storeId_ = "[unknown_URI]";
61 }
62 return storeId_.termedBuf();
63 }
64
65 const char *
66 MemObject::logUri() const
67 {
68 return logUri_.size() ? logUri_.termedBuf() : storeId();
69 }
70
71 bool
72 MemObject::hasUris() const
73 {
74 return storeId_.size();
75 }
76
77 void
78 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
79 {
80 storeId_ = aStoreId;
81
82 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
83 if (!aLogUri || aLogUri == aStoreId)
84 logUri_.clean(); // use storeId_ by default to minimize copying
85 else
86 logUri_ = aLogUri;
87
88 method = aMethod;
89
90 #if URL_CHECKSUM_DEBUG
91 chksum = url_checksum(urlXXX());
92 #endif
93 }
94
95 MemObject::MemObject() :
96 inmem_lo(0),
97 nclients(0),
98 smpCollapsed(false),
99 request(nullptr),
100 ping_reply_callback(nullptr),
101 ircb_data(nullptr),
102 id(0),
103 object_sz(-1),
104 swap_hdr_sz(0),
105 #if URL_CHECKSUM_DEBUG
106 chksum(0),
107 #endif
108 vary_headers(nullptr)
109 {
110 debugs(20, 3, "new MemObject " << this);
111 memset(&start_ping, 0, sizeof(start_ping));
112 memset(&abort, 0, sizeof(abort));
113 _reply = new HttpReply;
114 HTTPMSGLOCK(_reply);
115 }
116
117 MemObject::~MemObject()
118 {
119 debugs(20, 3, "del MemObject " << this);
120 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
121
122 #if URL_CHECKSUM_DEBUG
123 checkUrlChecksum();
124 #endif
125
126 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
127 assert(xitTable.index < 0);
128 assert(memCache.index < 0);
129 assert(swapout.sio == NULL);
130 }
131
132 data_hdr.freeContent();
133
134 #if 0
135 /*
136 * There is no way to abort FD-less clients, so they might
137 * still have mem->clients set.
138 */
139 assert(clients.head == NULL);
140
141 #endif
142
143 HTTPMSGUNLOCK(_reply);
144
145 HTTPMSGUNLOCK(request);
146
147 ctx_exit(ctx); /* must exit before we free mem->url */
148 }
149
150 void
151 MemObject::unlinkRequest()
152 {
153 HTTPMSGUNLOCK(request);
154 }
155
156 void
157 MemObject::write(const StoreIOBuffer &writeBuffer)
158 {
159 PROF_start(MemObject_write);
160 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
161
162 /* We don't separate out mime headers yet, so ensure that the first
163 * write is at offset 0 - where they start
164 */
165 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
166
167 assert (data_hdr.write (writeBuffer));
168 PROF_stop(MemObject_write);
169 }
170
171 void
172 MemObject::dump() const
173 {
174 data_hdr.dump();
175 #if 0
176 /* do we want this one? */
177 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
178 #endif
179
180 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
181 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
182 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
183 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
184 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
185 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
186 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
187 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
188 }
189
190 HttpReply const *
191 MemObject::getReply() const
192 {
193 return _reply;
194 }
195
196 void
197 MemObject::replaceHttpReply(HttpReply *newrep)
198 {
199 HTTPMSGUNLOCK(_reply);
200 _reply = newrep;
201 HTTPMSGLOCK(_reply);
202 }
203
204 struct LowestMemReader : public unary_function<store_client, void> {
205 LowestMemReader(int64_t seed):current(seed) {}
206
207 void operator() (store_client const &x) {
208 if (x.memReaderHasLowerOffset(current))
209 current = x.copyInto.offset;
210 }
211
212 int64_t current;
213 };
214
215 struct StoreClientStats : public unary_function<store_client, void> {
216 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
217
218 void operator()(store_client const &x) {
219 x.dumpStats(where, index);
220 ++index;
221 }
222
223 MemBuf *where;
224 size_t index;
225 };
226
227 void
228 MemObject::stat(MemBuf * mb) const
229 {
230 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
231 if (!vary_headers.isEmpty())
232 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
233 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
234 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
235 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
236
237 if (swapout.sio.getRaw())
238 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
239
240 if (xitTable.index >= 0)
241 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
242 if (memCache.index >= 0)
243 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
244 if (object_sz >= 0)
245 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
246 if (smpCollapsed)
247 mb->appendf("\tsmp-collapsed\n");
248
249 StoreClientStats statsVisitor(mb);
250
251 for_each<StoreClientStats>(clients, statsVisitor);
252 }
253
254 int64_t
255 MemObject::endOffset () const
256 {
257 return data_hdr.endOffset();
258 }
259
260 void
261 MemObject::markEndOfReplyHeaders()
262 {
263 const int hdr_sz = endOffset();
264 assert(hdr_sz >= 0);
265 assert(_reply);
266 _reply->hdr_sz = hdr_sz;
267 }
268
269 int64_t
270 MemObject::size() const
271 {
272 if (object_sz < 0)
273 return endOffset();
274
275 return object_sz;
276 }
277
278 int64_t
279 MemObject::expectedReplySize() const
280 {
281 debugs(20, 7, HERE << "object_sz: " << object_sz);
282 if (object_sz >= 0) // complete() has been called; we know the exact answer
283 return object_sz;
284
285 if (_reply) {
286 const int64_t clen = _reply->bodySize(method);
287 debugs(20, 7, HERE << "clen: " << clen);
288 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
289 return clen + _reply->hdr_sz;
290 }
291
292 return -1; // not enough information to predict
293 }
294
295 void
296 MemObject::reset()
297 {
298 assert(swapout.sio == NULL);
299 data_hdr.freeContent();
300 inmem_lo = 0;
301 /* Should we check for clients? */
302 }
303
304 int64_t
305 MemObject::lowestMemReaderOffset() const
306 {
307 LowestMemReader lowest (endOffset() + 1);
308
309 for_each <LowestMemReader>(clients, lowest);
310
311 return lowest.current;
312 }
313
314 /* XXX: This is wrong. It breaks *badly* on range combining */
315 bool
316 MemObject::readAheadPolicyCanRead() const
317 {
318 const bool canRead = endOffset() - getReply()->hdr_sz <
319 lowestMemReaderOffset() + Config.readAheadGap;
320
321 if (!canRead) {
322 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
323 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
324 }
325
326 return canRead;
327 }
328
329 void
330 MemObject::addClient(store_client *aClient)
331 {
332 ++nclients;
333 dlinkAdd(aClient, &aClient->node, &clients);
334 }
335
336 #if URL_CHECKSUM_DEBUG
337 void
338 MemObject::checkUrlChecksum () const
339 {
340 assert(chksum == url_checksum(urlXXX()));
341 }
342
343 #endif
344
345 /*
346 * How much of the object data is on the disk?
347 */
348 int64_t
349 MemObject::objectBytesOnDisk() const
350 {
351 /*
352 * NOTE: storeOffset() represents the disk file size,
353 * not the amount of object data on disk.
354 *
355 * If we don't have at least 'swap_hdr_sz' bytes
356 * then none of the object data is on disk.
357 *
358 * This should still be safe if swap_hdr_sz == 0,
359 * meaning we haven't even opened the swapout file
360 * yet.
361 */
362
363 if (swapout.sio.getRaw() == NULL)
364 return 0;
365
366 int64_t nwritten = swapout.sio->offset();
367
368 if (nwritten <= (int64_t)swap_hdr_sz)
369 return 0;
370
371 return (nwritten - swap_hdr_sz);
372 }
373
374 int64_t
375 MemObject::policyLowestOffsetToKeep(bool swap) const
376 {
377 /*
378 * Careful. lowest_offset can be greater than endOffset(), such
379 * as in the case of a range request.
380 */
381 int64_t lowest_offset = lowestMemReaderOffset();
382
383 if (endOffset() < lowest_offset ||
384 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
385 (swap && !Config.onoff.memory_cache_first))
386 return lowest_offset;
387
388 return inmem_lo;
389 }
390
391 void
392 MemObject::trimSwappable()
393 {
394 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
395 /*
396 * We should only free up to what we know has been written
397 * to disk, not what has been queued for writing. Otherwise
398 * there will be a chunk of the data which is not in memory
399 * and is not yet on disk.
400 * The -1 makes sure the page isn't freed until storeSwapOut has
401 * walked to the next page.
402 */
403 int64_t on_disk;
404
405 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
406 new_mem_lo = on_disk - 1;
407
408 if (new_mem_lo == -1)
409 new_mem_lo = 0; /* the above might become -1 */
410
411 data_hdr.freeDataUpto(new_mem_lo);
412
413 inmem_lo = new_mem_lo;
414 }
415
416 void
417 MemObject::trimUnSwappable()
418 {
419 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
420 assert (new_mem_lo > 0);
421 data_hdr.freeDataUpto(new_mem_lo);
422 inmem_lo = new_mem_lo;
423 } // else we should not trim anything at this time
424 }
425
426 bool
427 MemObject::isContiguous() const
428 {
429 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
430 /* XXX : make this higher level */
431 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
432 return result;
433 }
434
435 int
436 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
437 {
438 #if USE_DELAY_POOLS
439 if (!ignoreDelayPools) {
440 /* identify delay id with largest allowance */
441 DelayId largestAllowance = mostBytesAllowed ();
442 return largestAllowance.bytesWanted(0, max);
443 }
444 #endif
445
446 return max;
447 }
448
449 void
450 MemObject::setNoDelay(bool const newValue)
451 {
452 #if USE_DELAY_POOLS
453
454 for (dlink_node *node = clients.head; node; node = node->next) {
455 store_client *sc = (store_client *) node->data;
456 sc->delayId.setNoDelay(newValue);
457 }
458
459 #endif
460 }
461
462 void
463 MemObject::delayRead(DeferredRead const &aRead)
464 {
465 deferredReads.delayRead(aRead);
466 }
467
468 void
469 MemObject::kickReads()
470 {
471 deferredReads.kickReads(-1);
472 }
473
474 #if USE_DELAY_POOLS
475 DelayId
476 MemObject::mostBytesAllowed() const
477 {
478 int j;
479 int jmax = -1;
480 DelayId result;
481
482 for (dlink_node *node = clients.head; node; node = node->next) {
483 store_client *sc = (store_client *) node->data;
484 #if 0
485 /* This test is invalid because the client may be writing data
486 * and thus will want data immediately.
487 * If we include the test, there is a race condition when too much
488 * data is read - if all sc's are writing when a read is scheduled.
489 * XXX: fixme.
490 */
491
492 if (!sc->callbackPending())
493 /* not waiting for more data */
494 continue;
495
496 #endif
497
498 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
499
500 if (j > jmax) {
501 jmax = j;
502 result = sc->delayId;
503 }
504 }
505
506 return result;
507 }
508
509 #endif
510
511 int64_t
512 MemObject::availableForSwapOut() const
513 {
514 return endOffset() - swapout.queue_offset;
515 }
516