]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Cleanup: convert MemObject::request to Pointer
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "MemBuf.h"
17 #include "MemObject.h"
18 #include "profiler/Profiler.h"
19 #include "SquidConfig.h"
20 #include "Store.h"
21 #include "StoreClient.h"
22
23 #if USE_DELAY_POOLS
24 #include "DelayPools.h"
25 #endif
26
27 /* TODO: make this global or private */
28 #if URL_CHECKSUM_DEBUG
29 static unsigned int url_checksum(const char *url);
30 unsigned int
31 url_checksum(const char *url)
32 {
33 unsigned int ck;
34 SquidMD5_CTX M;
35 static unsigned char digest[16];
36 SquidMD5Init(&M);
37 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
38 SquidMD5Final(digest, &M);
39 memcpy(&ck, digest, sizeof(ck));
40 return ck;
41 }
42
43 #endif
44
45 RemovalPolicy * mem_policy = NULL;
46
47 size_t
48 MemObject::inUseCount()
49 {
50 return Pool().inUseCount();
51 }
52
53 const char *
54 MemObject::storeId() const
55 {
56 if (!storeId_.size()) {
57 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
58 dump();
59 storeId_ = "[unknown_URI]";
60 }
61 return storeId_.termedBuf();
62 }
63
64 const char *
65 MemObject::logUri() const
66 {
67 return logUri_.size() ? logUri_.termedBuf() : storeId();
68 }
69
70 bool
71 MemObject::hasUris() const
72 {
73 return storeId_.size();
74 }
75
76 void
77 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
78 {
79 storeId_ = aStoreId;
80
81 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
82 if (!aLogUri || aLogUri == aStoreId)
83 logUri_.clean(); // use storeId_ by default to minimize copying
84 else
85 logUri_ = aLogUri;
86
87 method = aMethod;
88
89 #if URL_CHECKSUM_DEBUG
90 chksum = url_checksum(urlXXX());
91 #endif
92 }
93
94 MemObject::MemObject() :
95 inmem_lo(0),
96 nclients(0),
97 smpCollapsed(false),
98 request(nullptr),
99 ping_reply_callback(nullptr),
100 ircb_data(nullptr),
101 id(0),
102 object_sz(-1),
103 swap_hdr_sz(0),
104 #if URL_CHECKSUM_DEBUG
105 chksum(0),
106 #endif
107 vary_headers(nullptr)
108 {
109 debugs(20, 3, "new MemObject " << this);
110 memset(&start_ping, 0, sizeof(start_ping));
111 memset(&abort, 0, sizeof(abort));
112 _reply = new HttpReply;
113 HTTPMSGLOCK(_reply);
114 }
115
116 MemObject::~MemObject()
117 {
118 debugs(20, 3, "del MemObject " << this);
119 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
120
121 #if URL_CHECKSUM_DEBUG
122 checkUrlChecksum();
123 #endif
124
125 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
126 assert(xitTable.index < 0);
127 assert(memCache.index < 0);
128 assert(swapout.sio == NULL);
129 }
130
131 data_hdr.freeContent();
132
133 #if 0
134 /*
135 * There is no way to abort FD-less clients, so they might
136 * still have mem->clients set.
137 */
138 assert(clients.head == NULL);
139
140 #endif
141
142 HTTPMSGUNLOCK(_reply);
143
144 ctx_exit(ctx); /* must exit before we free mem->url */
145 }
146
147 void
148 MemObject::write(const StoreIOBuffer &writeBuffer)
149 {
150 PROF_start(MemObject_write);
151 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
152
153 /* We don't separate out mime headers yet, so ensure that the first
154 * write is at offset 0 - where they start
155 */
156 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
157
158 assert (data_hdr.write (writeBuffer));
159 PROF_stop(MemObject_write);
160 }
161
162 void
163 MemObject::dump() const
164 {
165 data_hdr.dump();
166 #if 0
167 /* do we want this one? */
168 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
169 #endif
170
171 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
172 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
173 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
174 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
175 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
176 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
177 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
178 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
179 }
180
181 HttpReply const *
182 MemObject::getReply() const
183 {
184 return _reply;
185 }
186
187 void
188 MemObject::replaceHttpReply(HttpReply *newrep)
189 {
190 HTTPMSGUNLOCK(_reply);
191 _reply = newrep;
192 HTTPMSGLOCK(_reply);
193 }
194
195 struct LowestMemReader : public unary_function<store_client, void> {
196 LowestMemReader(int64_t seed):current(seed) {}
197
198 void operator() (store_client const &x) {
199 if (x.memReaderHasLowerOffset(current))
200 current = x.copyInto.offset;
201 }
202
203 int64_t current;
204 };
205
206 struct StoreClientStats : public unary_function<store_client, void> {
207 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
208
209 void operator()(store_client const &x) {
210 x.dumpStats(where, index);
211 ++index;
212 }
213
214 MemBuf *where;
215 size_t index;
216 };
217
218 void
219 MemObject::stat(MemBuf * mb) const
220 {
221 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
222 if (!vary_headers.isEmpty())
223 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
224 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
225 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
226 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
227
228 if (swapout.sio.getRaw())
229 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
230
231 if (xitTable.index >= 0)
232 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
233 if (memCache.index >= 0)
234 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
235 if (object_sz >= 0)
236 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
237 if (smpCollapsed)
238 mb->appendf("\tsmp-collapsed\n");
239
240 StoreClientStats statsVisitor(mb);
241
242 for_each<StoreClientStats>(clients, statsVisitor);
243 }
244
245 int64_t
246 MemObject::endOffset () const
247 {
248 return data_hdr.endOffset();
249 }
250
251 void
252 MemObject::markEndOfReplyHeaders()
253 {
254 const int hdr_sz = endOffset();
255 assert(hdr_sz >= 0);
256 assert(_reply);
257 _reply->hdr_sz = hdr_sz;
258 }
259
260 int64_t
261 MemObject::size() const
262 {
263 if (object_sz < 0)
264 return endOffset();
265
266 return object_sz;
267 }
268
269 int64_t
270 MemObject::expectedReplySize() const
271 {
272 debugs(20, 7, HERE << "object_sz: " << object_sz);
273 if (object_sz >= 0) // complete() has been called; we know the exact answer
274 return object_sz;
275
276 if (_reply) {
277 const int64_t clen = _reply->bodySize(method);
278 debugs(20, 7, HERE << "clen: " << clen);
279 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: Http::Message sets hdr_sz to 0
280 return clen + _reply->hdr_sz;
281 }
282
283 return -1; // not enough information to predict
284 }
285
286 void
287 MemObject::reset()
288 {
289 assert(swapout.sio == NULL);
290 data_hdr.freeContent();
291 inmem_lo = 0;
292 /* Should we check for clients? */
293 }
294
295 int64_t
296 MemObject::lowestMemReaderOffset() const
297 {
298 LowestMemReader lowest (endOffset() + 1);
299
300 for_each <LowestMemReader>(clients, lowest);
301
302 return lowest.current;
303 }
304
305 /* XXX: This is wrong. It breaks *badly* on range combining */
306 bool
307 MemObject::readAheadPolicyCanRead() const
308 {
309 const bool canRead = endOffset() - getReply()->hdr_sz <
310 lowestMemReaderOffset() + Config.readAheadGap;
311
312 if (!canRead) {
313 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
314 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
315 }
316
317 return canRead;
318 }
319
320 void
321 MemObject::addClient(store_client *aClient)
322 {
323 ++nclients;
324 dlinkAdd(aClient, &aClient->node, &clients);
325 }
326
327 #if URL_CHECKSUM_DEBUG
328 void
329 MemObject::checkUrlChecksum () const
330 {
331 assert(chksum == url_checksum(urlXXX()));
332 }
333
334 #endif
335
336 /*
337 * How much of the object data is on the disk?
338 */
339 int64_t
340 MemObject::objectBytesOnDisk() const
341 {
342 /*
343 * NOTE: storeOffset() represents the disk file size,
344 * not the amount of object data on disk.
345 *
346 * If we don't have at least 'swap_hdr_sz' bytes
347 * then none of the object data is on disk.
348 *
349 * This should still be safe if swap_hdr_sz == 0,
350 * meaning we haven't even opened the swapout file
351 * yet.
352 */
353
354 if (swapout.sio.getRaw() == NULL)
355 return 0;
356
357 int64_t nwritten = swapout.sio->offset();
358
359 if (nwritten <= (int64_t)swap_hdr_sz)
360 return 0;
361
362 return (nwritten - swap_hdr_sz);
363 }
364
365 int64_t
366 MemObject::policyLowestOffsetToKeep(bool swap) const
367 {
368 /*
369 * Careful. lowest_offset can be greater than endOffset(), such
370 * as in the case of a range request.
371 */
372 int64_t lowest_offset = lowestMemReaderOffset();
373
374 if (endOffset() < lowest_offset ||
375 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
376 (swap && !Config.onoff.memory_cache_first))
377 return lowest_offset;
378
379 return inmem_lo;
380 }
381
382 void
383 MemObject::trimSwappable()
384 {
385 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
386 /*
387 * We should only free up to what we know has been written
388 * to disk, not what has been queued for writing. Otherwise
389 * there will be a chunk of the data which is not in memory
390 * and is not yet on disk.
391 * The -1 makes sure the page isn't freed until storeSwapOut has
392 * walked to the next page.
393 */
394 int64_t on_disk;
395
396 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
397 new_mem_lo = on_disk - 1;
398
399 if (new_mem_lo == -1)
400 new_mem_lo = 0; /* the above might become -1 */
401
402 data_hdr.freeDataUpto(new_mem_lo);
403
404 inmem_lo = new_mem_lo;
405 }
406
407 void
408 MemObject::trimUnSwappable()
409 {
410 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
411 assert (new_mem_lo > 0);
412 data_hdr.freeDataUpto(new_mem_lo);
413 inmem_lo = new_mem_lo;
414 } // else we should not trim anything at this time
415 }
416
417 bool
418 MemObject::isContiguous() const
419 {
420 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
421 /* XXX : make this higher level */
422 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
423 return result;
424 }
425
426 int
427 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
428 {
429 #if USE_DELAY_POOLS
430 if (!ignoreDelayPools) {
431 /* identify delay id with largest allowance */
432 DelayId largestAllowance = mostBytesAllowed ();
433 return largestAllowance.bytesWanted(0, max);
434 }
435 #endif
436
437 return max;
438 }
439
440 void
441 MemObject::setNoDelay(bool const newValue)
442 {
443 #if USE_DELAY_POOLS
444
445 for (dlink_node *node = clients.head; node; node = node->next) {
446 store_client *sc = (store_client *) node->data;
447 sc->delayId.setNoDelay(newValue);
448 }
449
450 #endif
451 }
452
453 void
454 MemObject::delayRead(DeferredRead const &aRead)
455 {
456 deferredReads.delayRead(aRead);
457 }
458
459 void
460 MemObject::kickReads()
461 {
462 deferredReads.kickReads(-1);
463 }
464
465 #if USE_DELAY_POOLS
466 DelayId
467 MemObject::mostBytesAllowed() const
468 {
469 int j;
470 int jmax = -1;
471 DelayId result;
472
473 for (dlink_node *node = clients.head; node; node = node->next) {
474 store_client *sc = (store_client *) node->data;
475 #if 0
476 /* This test is invalid because the client may be writing data
477 * and thus will want data immediately.
478 * If we include the test, there is a race condition when too much
479 * data is read - if all sc's are writing when a read is scheduled.
480 * XXX: fixme.
481 */
482
483 if (!sc->callbackPending())
484 /* not waiting for more data */
485 continue;
486
487 #endif
488
489 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
490
491 if (j > jmax) {
492 jmax = j;
493 result = sc->delayId;
494 }
495 }
496
497 return result;
498 }
499
500 #endif
501
502 int64_t
503 MemObject::availableForSwapOut() const
504 {
505 return endOffset() - swapout.queue_offset;
506 }
507