]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Source Format Enforcement (#532)
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "MemBuf.h"
17 #include "MemObject.h"
18 #include "profiler/Profiler.h"
19 #include "SquidConfig.h"
20 #include "Store.h"
21 #include "StoreClient.h"
22
23 #if USE_DELAY_POOLS
24 #include "DelayPools.h"
25 #endif
26
27 /* TODO: make this global or private */
28 #if URL_CHECKSUM_DEBUG
29 static unsigned int url_checksum(const char *url);
30 unsigned int
31 url_checksum(const char *url)
32 {
33 unsigned int ck;
34 SquidMD5_CTX M;
35 static unsigned char digest[16];
36 SquidMD5Init(&M);
37 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
38 SquidMD5Final(digest, &M);
39 memcpy(&ck, digest, sizeof(ck));
40 return ck;
41 }
42
43 #endif
44
45 RemovalPolicy * mem_policy = NULL;
46
47 size_t
48 MemObject::inUseCount()
49 {
50 return Pool().inUseCount();
51 }
52
53 const char *
54 MemObject::storeId() const
55 {
56 if (!storeId_.size()) {
57 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
58 dump();
59 storeId_ = "[unknown_URI]";
60 }
61 return storeId_.termedBuf();
62 }
63
64 const char *
65 MemObject::logUri() const
66 {
67 return logUri_.size() ? logUri_.termedBuf() : storeId();
68 }
69
70 bool
71 MemObject::hasUris() const
72 {
73 return storeId_.size();
74 }
75
76 void
77 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
78 {
79 if (hasUris())
80 return;
81
82 storeId_ = aStoreId;
83 debugs(88, 3, this << " storeId: " << storeId_);
84
85 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
86 if (!aLogUri || aLogUri == aStoreId)
87 logUri_.clean(); // use storeId_ by default to minimize copying
88 else
89 logUri_ = aLogUri;
90
91 method = aMethod;
92
93 #if URL_CHECKSUM_DEBUG
94 chksum = url_checksum(urlXXX());
95 #endif
96 }
97
98 MemObject::MemObject()
99 {
100 debugs(20, 3, "MemObject constructed, this=" << this);
101 ping_reply_callback = nullptr;
102 memset(&start_ping, 0, sizeof(start_ping));
103 reply_ = new HttpReply;
104 }
105
106 MemObject::~MemObject()
107 {
108 debugs(20, 3, "MemObject destructed, this=" << this);
109 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
110
111 #if URL_CHECKSUM_DEBUG
112 checkUrlChecksum();
113 #endif
114
115 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
116 assert(xitTable.index < 0);
117 assert(memCache.index < 0);
118 assert(swapout.sio == NULL);
119 }
120
121 data_hdr.freeContent();
122
123 #if 0
124 /*
125 * There is no way to abort FD-less clients, so they might
126 * still have mem->clients set.
127 */
128 assert(clients.head == NULL);
129
130 #endif
131
132 ctx_exit(ctx); /* must exit before we free mem->url */
133 }
134
135 HttpReply &
136 MemObject::adjustableBaseReply()
137 {
138 assert(!updatedReply_);
139 return *reply_;
140 }
141
142 void
143 MemObject::replaceBaseReply(const HttpReplyPointer &r)
144 {
145 assert(r);
146 reply_ = r;
147 updatedReply_ = nullptr;
148 }
149
150 void
151 MemObject::write(const StoreIOBuffer &writeBuffer)
152 {
153 PROF_start(MemObject_write);
154 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
155
156 /* We don't separate out mime headers yet, so ensure that the first
157 * write is at offset 0 - where they start
158 */
159 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
160
161 assert (data_hdr.write (writeBuffer));
162 PROF_stop(MemObject_write);
163 }
164
165 void
166 MemObject::dump() const
167 {
168 data_hdr.dump();
169 #if 0
170 /* do we want this one? */
171 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
172 #endif
173
174 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
175 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
176 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
177 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
178 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << reply_);
179 debugs(20, DBG_IMPORTANT, "MemObject->updatedReply: " << updatedReply_);
180 debugs(20, DBG_IMPORTANT, "MemObject->appliedUpdates: " << appliedUpdates);
181 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
182 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
183 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
184 }
185
186 struct LowestMemReader : public unary_function<store_client, void> {
187 LowestMemReader(int64_t seed):current(seed) {}
188
189 void operator() (store_client const &x) {
190 if (x.memReaderHasLowerOffset(current))
191 current = x.copyInto.offset;
192 }
193
194 int64_t current;
195 };
196
197 struct StoreClientStats : public unary_function<store_client, void> {
198 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
199
200 void operator()(store_client const &x) {
201 x.dumpStats(where, index);
202 ++index;
203 }
204
205 MemBuf *where;
206 size_t index;
207 };
208
209 void
210 MemObject::stat(MemBuf * mb) const
211 {
212 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
213 if (!vary_headers.isEmpty())
214 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
215 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
216 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
217 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
218
219 if (swapout.sio.getRaw())
220 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
221
222 if (xitTable.index >= 0)
223 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
224 if (memCache.index >= 0)
225 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
226 if (object_sz >= 0)
227 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
228
229 StoreClientStats statsVisitor(mb);
230
231 for_each<StoreClientStats>(clients, statsVisitor);
232 }
233
234 int64_t
235 MemObject::endOffset () const
236 {
237 return data_hdr.endOffset();
238 }
239
240 void
241 MemObject::markEndOfReplyHeaders()
242 {
243 const int hdr_sz = endOffset();
244 assert(hdr_sz >= 0);
245 assert(reply_);
246 reply_->hdr_sz = hdr_sz;
247 }
248
249 int64_t
250 MemObject::size() const
251 {
252 if (object_sz < 0)
253 return endOffset();
254
255 return object_sz;
256 }
257
258 int64_t
259 MemObject::expectedReplySize() const
260 {
261 if (object_sz >= 0) {
262 debugs(20, 7, object_sz << " frozen by complete()");
263 return object_sz;
264 }
265
266 const auto hdr_sz = baseReply().hdr_sz;
267
268 // Cannot predict future length using an empty/unset or HTTP/0 reply.
269 // For any HTTP/1 reply, hdr_sz is positive -- status-line cannot be empty.
270 if (hdr_sz <= 0)
271 return -1;
272
273 const auto clen = baseReply().bodySize(method);
274 if (clen < 0) {
275 debugs(20, 7, "unknown; hdr: " << hdr_sz);
276 return -1;
277 }
278
279 const auto messageSize = clen + hdr_sz;
280 debugs(20, 7, messageSize << " hdr: " << hdr_sz << " clen: " << clen);
281 return messageSize;
282 }
283
284 void
285 MemObject::reset()
286 {
287 assert(swapout.sio == NULL);
288 data_hdr.freeContent();
289 inmem_lo = 0;
290 /* Should we check for clients? */
291 assert(reply_);
292 reply_->reset();
293 updatedReply_ = nullptr;
294 appliedUpdates = false;
295 }
296
297 int64_t
298 MemObject::lowestMemReaderOffset() const
299 {
300 LowestMemReader lowest (endOffset() + 1);
301
302 for_each <LowestMemReader>(clients, lowest);
303
304 return lowest.current;
305 }
306
307 /* XXX: This is wrong. It breaks *badly* on range combining */
308 bool
309 MemObject::readAheadPolicyCanRead() const
310 {
311 const auto savedHttpHeaders = baseReply().hdr_sz;
312 const bool canRead = endOffset() - savedHttpHeaders <
313 lowestMemReaderOffset() + Config.readAheadGap;
314
315 if (!canRead) {
316 debugs(19, 5, "no: " << endOffset() << '-' << savedHttpHeaders <<
317 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
318 }
319
320 return canRead;
321 }
322
323 void
324 MemObject::addClient(store_client *aClient)
325 {
326 ++nclients;
327 dlinkAdd(aClient, &aClient->node, &clients);
328 }
329
330 #if URL_CHECKSUM_DEBUG
331 void
332 MemObject::checkUrlChecksum () const
333 {
334 assert(chksum == url_checksum(urlXXX()));
335 }
336
337 #endif
338
339 /*
340 * How much of the object data is on the disk?
341 */
342 int64_t
343 MemObject::objectBytesOnDisk() const
344 {
345 /*
346 * NOTE: storeOffset() represents the disk file size,
347 * not the amount of object data on disk.
348 *
349 * If we don't have at least 'swap_hdr_sz' bytes
350 * then none of the object data is on disk.
351 *
352 * This should still be safe if swap_hdr_sz == 0,
353 * meaning we haven't even opened the swapout file
354 * yet.
355 */
356
357 if (swapout.sio.getRaw() == NULL)
358 return 0;
359
360 int64_t nwritten = swapout.sio->offset();
361
362 if (nwritten <= (int64_t)swap_hdr_sz)
363 return 0;
364
365 return (nwritten - swap_hdr_sz);
366 }
367
368 int64_t
369 MemObject::policyLowestOffsetToKeep(bool swap) const
370 {
371 /*
372 * Careful. lowest_offset can be greater than endOffset(), such
373 * as in the case of a range request.
374 */
375 int64_t lowest_offset = lowestMemReaderOffset();
376
377 if (endOffset() < lowest_offset ||
378 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
379 (swap && !Config.onoff.memory_cache_first))
380 return lowest_offset;
381
382 return inmem_lo;
383 }
384
385 void
386 MemObject::trimSwappable()
387 {
388 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
389 /*
390 * We should only free up to what we know has been written
391 * to disk, not what has been queued for writing. Otherwise
392 * there will be a chunk of the data which is not in memory
393 * and is not yet on disk.
394 * The -1 makes sure the page isn't freed until storeSwapOut has
395 * walked to the next page.
396 */
397 int64_t on_disk;
398
399 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
400 new_mem_lo = on_disk - 1;
401
402 if (new_mem_lo == -1)
403 new_mem_lo = 0; /* the above might become -1 */
404
405 data_hdr.freeDataUpto(new_mem_lo);
406
407 inmem_lo = new_mem_lo;
408 }
409
410 void
411 MemObject::trimUnSwappable()
412 {
413 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
414 assert (new_mem_lo > 0);
415 data_hdr.freeDataUpto(new_mem_lo);
416 inmem_lo = new_mem_lo;
417 } // else we should not trim anything at this time
418 }
419
420 bool
421 MemObject::isContiguous() const
422 {
423 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
424 /* XXX : make this higher level */
425 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
426 return result;
427 }
428
429 int
430 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
431 {
432 #if USE_DELAY_POOLS
433 if (!ignoreDelayPools) {
434 /* identify delay id with largest allowance */
435 DelayId largestAllowance = mostBytesAllowed ();
436 return largestAllowance.bytesWanted(0, max);
437 }
438 #endif
439
440 return max;
441 }
442
443 void
444 MemObject::setNoDelay(bool const newValue)
445 {
446 #if USE_DELAY_POOLS
447
448 for (dlink_node *node = clients.head; node; node = node->next) {
449 store_client *sc = (store_client *) node->data;
450 sc->delayId.setNoDelay(newValue);
451 }
452
453 #endif
454 }
455
456 void
457 MemObject::delayRead(DeferredRead const &aRead)
458 {
459 #if USE_DELAY_POOLS
460 if (readAheadPolicyCanRead()) {
461 if (DelayId mostAllowedId = mostBytesAllowed()) {
462 mostAllowedId.delayRead(aRead);
463 return;
464 }
465 }
466 #endif
467 deferredReads.delayRead(aRead);
468 }
469
470 void
471 MemObject::kickReads()
472 {
473 deferredReads.kickReads(-1);
474 }
475
476 #if USE_DELAY_POOLS
477 DelayId
478 MemObject::mostBytesAllowed() const
479 {
480 int j;
481 int jmax = -1;
482 DelayId result;
483
484 for (dlink_node *node = clients.head; node; node = node->next) {
485 store_client *sc = (store_client *) node->data;
486 #if 0
487 /* This test is invalid because the client may be writing data
488 * and thus will want data immediately.
489 * If we include the test, there is a race condition when too much
490 * data is read - if all sc's are writing when a read is scheduled.
491 * XXX: fixme.
492 */
493
494 if (!sc->callbackPending())
495 /* not waiting for more data */
496 continue;
497
498 #endif
499
500 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
501
502 if (j > jmax) {
503 jmax = j;
504 result = sc->delayId;
505 }
506 }
507
508 return result;
509 }
510
511 #endif
512
513 int64_t
514 MemObject::availableForSwapOut() const
515 {
516 return endOffset() - swapout.queue_offset;
517 }
518