]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Preserve caller context across (and improve) deferred reads (#1025)
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "MemBuf.h"
17 #include "MemObject.h"
18 #include "SquidConfig.h"
19 #include "Store.h"
20 #include "StoreClient.h"
21
22 #if USE_DELAY_POOLS
23 #include "DelayPools.h"
24 #endif
25
26 /* TODO: make this global or private */
27 #if URL_CHECKSUM_DEBUG
28 static unsigned int url_checksum(const char *url);
29 unsigned int
30 url_checksum(const char *url)
31 {
32 unsigned int ck;
33 SquidMD5_CTX M;
34 static unsigned char digest[16];
35 SquidMD5Init(&M);
36 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
37 SquidMD5Final(digest, &M);
38 memcpy(&ck, digest, sizeof(ck));
39 return ck;
40 }
41
42 #endif
43
44 RemovalPolicy * mem_policy = NULL;
45
46 size_t
47 MemObject::inUseCount()
48 {
49 return Pool().inUseCount();
50 }
51
52 const char *
53 MemObject::storeId() const
54 {
55 if (!storeId_.size()) {
56 debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: Missing MemObject::storeId value");
57 dump();
58 storeId_ = "[unknown_URI]";
59 }
60 return storeId_.termedBuf();
61 }
62
63 const char *
64 MemObject::logUri() const
65 {
66 return logUri_.size() ? logUri_.termedBuf() : storeId();
67 }
68
69 bool
70 MemObject::hasUris() const
71 {
72 return storeId_.size();
73 }
74
75 void
76 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
77 {
78 if (hasUris())
79 return;
80
81 storeId_ = aStoreId;
82 debugs(88, 3, this << " storeId: " << storeId_);
83
84 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
85 if (!aLogUri || aLogUri == aStoreId)
86 logUri_.clean(); // use storeId_ by default to minimize copying
87 else
88 logUri_ = aLogUri;
89
90 method = aMethod;
91
92 #if URL_CHECKSUM_DEBUG
93 chksum = url_checksum(urlXXX());
94 #endif
95 }
96
97 MemObject::MemObject()
98 {
99 debugs(20, 3, "MemObject constructed, this=" << this);
100 ping_reply_callback = nullptr;
101 memset(&start_ping, 0, sizeof(start_ping));
102 reply_ = new HttpReply;
103 }
104
105 MemObject::~MemObject()
106 {
107 debugs(20, 3, "MemObject destructed, this=" << this);
108
109 #if URL_CHECKSUM_DEBUG
110 checkUrlChecksum();
111 #endif
112
113 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
114 assert(xitTable.index < 0);
115 assert(memCache.index < 0);
116 assert(swapout.sio == NULL);
117 }
118
119 data_hdr.freeContent();
120 }
121
122 HttpReply &
123 MemObject::adjustableBaseReply()
124 {
125 assert(!updatedReply_);
126 return *reply_;
127 }
128
129 void
130 MemObject::replaceBaseReply(const HttpReplyPointer &r)
131 {
132 assert(r);
133 reply_ = r;
134 updatedReply_ = nullptr;
135 }
136
137 void
138 MemObject::write(const StoreIOBuffer &writeBuffer)
139 {
140 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
141
142 /* We don't separate out mime headers yet, so ensure that the first
143 * write is at offset 0 - where they start
144 */
145 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
146
147 assert (data_hdr.write (writeBuffer));
148 }
149
150 void
151 MemObject::dump() const
152 {
153 data_hdr.dump();
154
155 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping);
156 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
157 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
158 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
159 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << reply_);
160 debugs(20, DBG_IMPORTANT, "MemObject->updatedReply: " << updatedReply_);
161 debugs(20, DBG_IMPORTANT, "MemObject->appliedUpdates: " << appliedUpdates);
162 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
163 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
164 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
165 }
166
167 struct LowestMemReader : public unary_function<store_client, void> {
168 LowestMemReader(int64_t seed):current(seed) {}
169
170 void operator() (store_client const &x) {
171 if (x.memReaderHasLowerOffset(current))
172 current = x.copyInto.offset;
173 }
174
175 int64_t current;
176 };
177
178 struct StoreClientStats : public unary_function<store_client, void> {
179 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
180
181 void operator()(store_client const &x) {
182 x.dumpStats(where, index);
183 ++index;
184 }
185
186 MemBuf *where;
187 size_t index;
188 };
189
190 void
191 MemObject::stat(MemBuf * mb) const
192 {
193 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
194 if (!vary_headers.isEmpty())
195 mb->appendf("\tvary_headers: " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(vary_headers));
196 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
197 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
198 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
199
200 if (swapout.sio.getRaw())
201 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
202
203 if (xitTable.index >= 0)
204 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
205 if (memCache.index >= 0)
206 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
207 if (object_sz >= 0)
208 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
209
210 StoreClientStats statsVisitor(mb);
211
212 for_each<StoreClientStats>(clients, statsVisitor);
213 }
214
215 int64_t
216 MemObject::endOffset () const
217 {
218 return data_hdr.endOffset();
219 }
220
221 void
222 MemObject::markEndOfReplyHeaders()
223 {
224 const int hdr_sz = endOffset();
225 assert(hdr_sz >= 0);
226 assert(reply_);
227 reply_->hdr_sz = hdr_sz;
228 }
229
230 int64_t
231 MemObject::size() const
232 {
233 if (object_sz < 0)
234 return endOffset();
235
236 return object_sz;
237 }
238
239 int64_t
240 MemObject::expectedReplySize() const
241 {
242 if (object_sz >= 0) {
243 debugs(20, 7, object_sz << " frozen by complete()");
244 return object_sz;
245 }
246
247 const auto hdr_sz = baseReply().hdr_sz;
248
249 // Cannot predict future length using an empty/unset or HTTP/0 reply.
250 // For any HTTP/1 reply, hdr_sz is positive -- status-line cannot be empty.
251 if (hdr_sz <= 0)
252 return -1;
253
254 const auto clen = baseReply().bodySize(method);
255 if (clen < 0) {
256 debugs(20, 7, "unknown; hdr: " << hdr_sz);
257 return -1;
258 }
259
260 const auto messageSize = clen + hdr_sz;
261 debugs(20, 7, messageSize << " hdr: " << hdr_sz << " clen: " << clen);
262 return messageSize;
263 }
264
265 void
266 MemObject::reset()
267 {
268 assert(swapout.sio == NULL);
269 data_hdr.freeContent();
270 inmem_lo = 0;
271 /* Should we check for clients? */
272 assert(reply_);
273 reply_->reset();
274 updatedReply_ = nullptr;
275 appliedUpdates = false;
276 }
277
278 int64_t
279 MemObject::lowestMemReaderOffset() const
280 {
281 LowestMemReader lowest (endOffset() + 1);
282
283 for_each <LowestMemReader>(clients, lowest);
284
285 return lowest.current;
286 }
287
288 /* XXX: This is wrong. It breaks *badly* on range combining */
289 bool
290 MemObject::readAheadPolicyCanRead() const
291 {
292 const auto savedHttpHeaders = baseReply().hdr_sz;
293 const bool canRead = endOffset() - savedHttpHeaders <
294 lowestMemReaderOffset() + Config.readAheadGap;
295
296 if (!canRead) {
297 debugs(19, 5, "no: " << endOffset() << '-' << savedHttpHeaders <<
298 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
299 }
300
301 return canRead;
302 }
303
304 void
305 MemObject::addClient(store_client *aClient)
306 {
307 ++nclients;
308 dlinkAdd(aClient, &aClient->node, &clients);
309 }
310
311 #if URL_CHECKSUM_DEBUG
312 void
313 MemObject::checkUrlChecksum () const
314 {
315 assert(chksum == url_checksum(urlXXX()));
316 }
317
318 #endif
319
320 /*
321 * How much of the object data is on the disk?
322 */
323 int64_t
324 MemObject::objectBytesOnDisk() const
325 {
326 /*
327 * NOTE: storeOffset() represents the disk file size,
328 * not the amount of object data on disk.
329 *
330 * If we don't have at least 'swap_hdr_sz' bytes
331 * then none of the object data is on disk.
332 *
333 * This should still be safe if swap_hdr_sz == 0,
334 * meaning we haven't even opened the swapout file
335 * yet.
336 */
337
338 if (swapout.sio.getRaw() == NULL)
339 return 0;
340
341 int64_t nwritten = swapout.sio->offset();
342
343 if (nwritten <= (int64_t)swap_hdr_sz)
344 return 0;
345
346 return (nwritten - swap_hdr_sz);
347 }
348
349 int64_t
350 MemObject::policyLowestOffsetToKeep(bool swap) const
351 {
352 /*
353 * Careful. lowest_offset can be greater than endOffset(), such
354 * as in the case of a range request.
355 */
356 int64_t lowest_offset = lowestMemReaderOffset();
357
358 if (endOffset() < lowest_offset ||
359 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
360 (swap && !Config.onoff.memory_cache_first))
361 return lowest_offset;
362
363 return inmem_lo;
364 }
365
366 void
367 MemObject::trimSwappable()
368 {
369 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
370 /*
371 * We should only free up to what we know has been written
372 * to disk, not what has been queued for writing. Otherwise
373 * there will be a chunk of the data which is not in memory
374 * and is not yet on disk.
375 * The -1 makes sure the page isn't freed until storeSwapOut has
376 * walked to the next page.
377 */
378 int64_t on_disk;
379
380 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
381 new_mem_lo = on_disk - 1;
382
383 if (new_mem_lo == -1)
384 new_mem_lo = 0; /* the above might become -1 */
385
386 data_hdr.freeDataUpto(new_mem_lo);
387
388 inmem_lo = new_mem_lo;
389 }
390
391 void
392 MemObject::trimUnSwappable()
393 {
394 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
395 assert (new_mem_lo > 0);
396 data_hdr.freeDataUpto(new_mem_lo);
397 inmem_lo = new_mem_lo;
398 } // else we should not trim anything at this time
399 }
400
401 bool
402 MemObject::isContiguous() const
403 {
404 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
405 /* XXX : make this higher level */
406 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
407 return result;
408 }
409
410 int
411 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
412 {
413 #if USE_DELAY_POOLS
414 if (!ignoreDelayPools) {
415 /* identify delay id with largest allowance */
416 DelayId largestAllowance = mostBytesAllowed ();
417 return largestAllowance.bytesWanted(0, max);
418 }
419 #else
420 (void)ignoreDelayPools;
421 #endif
422
423 return max;
424 }
425
426 void
427 MemObject::setNoDelay(bool const newValue)
428 {
429 #if USE_DELAY_POOLS
430
431 for (dlink_node *node = clients.head; node; node = node->next) {
432 store_client *sc = (store_client *) node->data;
433 sc->delayId.setNoDelay(newValue);
434 }
435 #else
436 (void)newValue;
437 #endif
438 }
439
440 void
441 MemObject::delayRead(const AsyncCall::Pointer &aRead)
442 {
443 #if USE_DELAY_POOLS
444 if (readAheadPolicyCanRead()) {
445 if (DelayId mostAllowedId = mostBytesAllowed()) {
446 mostAllowedId.delayRead(aRead);
447 return;
448 }
449 }
450 #endif
451 deferredReads.delay(aRead);
452 }
453
454 void
455 MemObject::kickReads()
456 {
457 deferredReads.schedule();
458 }
459
460 #if USE_DELAY_POOLS
461 DelayId
462 MemObject::mostBytesAllowed() const
463 {
464 int j;
465 int jmax = -1;
466 DelayId result;
467
468 for (dlink_node *node = clients.head; node; node = node->next) {
469 store_client *sc = (store_client *) node->data;
470
471 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
472
473 if (j > jmax) {
474 jmax = j;
475 result = sc->delayId;
476 }
477 }
478
479 return result;
480 }
481
482 #endif
483
484 int64_t
485 MemObject::availableForSwapOut() const
486 {
487 return endOffset() - swapout.queue_offset;
488 }
489