]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemObject.cc
Rename Packable::Printf as Packable::appendf
[thirdparty/squid.git] / src / MemObject.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 19 Store Memory Primitives */
10
11 #include "squid.h"
12 #include "comm/Connection.h"
13 #include "Generic.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "HttpRequest.h"
17 #include "MemBuf.h"
18 #include "MemObject.h"
19 #include "profiler/Profiler.h"
20 #include "SquidConfig.h"
21 #include "Store.h"
22 #include "StoreClient.h"
23
24 #if USE_DELAY_POOLS
25 #include "DelayPools.h"
26 #endif
27
28 /* TODO: make this global or private */
29 #if URL_CHECKSUM_DEBUG
30 static unsigned int url_checksum(const char *url);
31 unsigned int
32 url_checksum(const char *url)
33 {
34 unsigned int ck;
35 SquidMD5_CTX M;
36 static unsigned char digest[16];
37 SquidMD5Init(&M);
38 SquidMD5Update(&M, (unsigned char *) url, strlen(url));
39 SquidMD5Final(digest, &M);
40 memcpy(&ck, digest, sizeof(ck));
41 return ck;
42 }
43
44 #endif
45
46 RemovalPolicy * mem_policy = NULL;
47
48 size_t
49 MemObject::inUseCount()
50 {
51 return Pool().inUseCount();
52 }
53
54 const char *
55 MemObject::storeId() const
56 {
57 if (!storeId_.size()) {
58 debugs(20, DBG_IMPORTANT, "Bug: Missing MemObject::storeId value");
59 dump();
60 storeId_ = "[unknown_URI]";
61 }
62 return storeId_.termedBuf();
63 }
64
65 const char *
66 MemObject::logUri() const
67 {
68 return logUri_.size() ? logUri_.termedBuf() : storeId();
69 }
70
71 bool
72 MemObject::hasUris() const
73 {
74 return storeId_.size();
75 }
76
77 void
78 MemObject::setUris(char const *aStoreId, char const *aLogUri, const HttpRequestMethod &aMethod)
79 {
80 storeId_ = aStoreId;
81
82 // fast pointer comparison for a common storeCreateEntry(url,url,...) case
83 if (!aLogUri || aLogUri == aStoreId)
84 logUri_.clean(); // use storeId_ by default to minimize copying
85 else
86 logUri_ = aLogUri;
87
88 method = aMethod;
89
90 #if URL_CHECKSUM_DEBUG
91 chksum = url_checksum(urlXXX());
92 #endif
93 }
94
95 MemObject::MemObject(): smpCollapsed(false)
96 {
97 debugs(20, 3, HERE << "new MemObject " << this);
98 _reply = new HttpReply;
99 HTTPMSGLOCK(_reply);
100
101 object_sz = -1;
102
103 /* XXX account log_url */
104
105 swapout.decision = SwapOut::swNeedsCheck;
106 }
107
108 MemObject::~MemObject()
109 {
110 debugs(20, 3, HERE << "del MemObject " << this);
111 const Ctx ctx = ctx_enter(hasUris() ? urlXXX() : "[unknown_ctx]");
112
113 #if URL_CHECKSUM_DEBUG
114 checkUrlChecksum();
115 #endif
116
117 if (!shutting_down) { // Store::Root() is FATALly missing during shutdown
118 assert(xitTable.index < 0);
119 assert(memCache.index < 0);
120 assert(swapout.sio == NULL);
121 }
122
123 data_hdr.freeContent();
124
125 #if 0
126 /*
127 * There is no way to abort FD-less clients, so they might
128 * still have mem->clients set.
129 */
130 assert(clients.head == NULL);
131
132 #endif
133
134 HTTPMSGUNLOCK(_reply);
135
136 HTTPMSGUNLOCK(request);
137
138 ctx_exit(ctx); /* must exit before we free mem->url */
139
140 safe_free(vary_headers);
141 }
142
143 void
144 MemObject::unlinkRequest()
145 {
146 HTTPMSGUNLOCK(request);
147 }
148
149 void
150 MemObject::write(const StoreIOBuffer &writeBuffer)
151 {
152 PROF_start(MemObject_write);
153 debugs(19, 6, "memWrite: offset " << writeBuffer.offset << " len " << writeBuffer.length);
154
155 /* We don't separate out mime headers yet, so ensure that the first
156 * write is at offset 0 - where they start
157 */
158 assert (data_hdr.endOffset() || writeBuffer.offset == 0);
159
160 assert (data_hdr.write (writeBuffer));
161 PROF_stop(MemObject_write);
162 }
163
164 void
165 MemObject::dump() const
166 {
167 data_hdr.dump();
168 #if 0
169 /* do we want this one? */
170 debugs(20, DBG_IMPORTANT, "MemObject->data.origin_offset: " << (data_hdr.head ? data_hdr.head->nodeBuffer.offset : 0));
171 #endif
172
173 debugs(20, DBG_IMPORTANT, "MemObject->start_ping: " << start_ping.tv_sec << "."<< std::setfill('0') << std::setw(6) << start_ping.tv_usec);
174 debugs(20, DBG_IMPORTANT, "MemObject->inmem_hi: " << data_hdr.endOffset());
175 debugs(20, DBG_IMPORTANT, "MemObject->inmem_lo: " << inmem_lo);
176 debugs(20, DBG_IMPORTANT, "MemObject->nclients: " << nclients);
177 debugs(20, DBG_IMPORTANT, "MemObject->reply: " << _reply);
178 debugs(20, DBG_IMPORTANT, "MemObject->request: " << request);
179 debugs(20, DBG_IMPORTANT, "MemObject->logUri: " << logUri_);
180 debugs(20, DBG_IMPORTANT, "MemObject->storeId: " << storeId_);
181 }
182
183 HttpReply const *
184 MemObject::getReply() const
185 {
186 return _reply;
187 }
188
189 void
190 MemObject::replaceHttpReply(HttpReply *newrep)
191 {
192 HTTPMSGUNLOCK(_reply);
193 _reply = newrep;
194 HTTPMSGLOCK(_reply);
195 }
196
197 struct LowestMemReader : public unary_function<store_client, void> {
198 LowestMemReader(int64_t seed):current(seed) {}
199
200 void operator() (store_client const &x) {
201 if (x.memReaderHasLowerOffset(current))
202 current = x.copyInto.offset;
203 }
204
205 int64_t current;
206 };
207
208 struct StoreClientStats : public unary_function<store_client, void> {
209 StoreClientStats(MemBuf *anEntry):where(anEntry),index(0) {}
210
211 void operator()(store_client const &x) {
212 x.dumpStats(where, index);
213 ++index;
214 }
215
216 MemBuf *where;
217 size_t index;
218 };
219
220 void
221 MemObject::stat(MemBuf * mb) const
222 {
223 mb->appendf("\t" SQUIDSBUFPH " %s\n", SQUIDSBUFPRINT(method.image()), logUri());
224 if (vary_headers)
225 mb->appendf("\tvary_headers: %s\n", vary_headers);
226 mb->appendf("\tinmem_lo: %" PRId64 "\n", inmem_lo);
227 mb->appendf("\tinmem_hi: %" PRId64 "\n", data_hdr.endOffset());
228 mb->appendf("\tswapout: %" PRId64 " bytes queued\n", swapout.queue_offset);
229
230 if (swapout.sio.getRaw())
231 mb->appendf("\tswapout: %" PRId64 " bytes written\n", (int64_t) swapout.sio->offset());
232
233 if (xitTable.index >= 0)
234 mb->appendf("\ttransient index: %d state: %d\n", xitTable.index, xitTable.io);
235 if (memCache.index >= 0)
236 mb->appendf("\tmem-cache index: %d state: %d offset: %" PRId64 "\n", memCache.index, memCache.io, memCache.offset);
237 if (object_sz >= 0)
238 mb->appendf("\tobject_sz: %" PRId64 "\n", object_sz);
239 if (smpCollapsed)
240 mb->appendf("\tsmp-collapsed\n");
241
242 StoreClientStats statsVisitor(mb);
243
244 for_each<StoreClientStats>(clients, statsVisitor);
245 }
246
247 int64_t
248 MemObject::endOffset () const
249 {
250 return data_hdr.endOffset();
251 }
252
253 void
254 MemObject::markEndOfReplyHeaders()
255 {
256 const int hdr_sz = endOffset();
257 assert(hdr_sz >= 0);
258 assert(_reply);
259 _reply->hdr_sz = hdr_sz;
260 }
261
262 int64_t
263 MemObject::size() const
264 {
265 if (object_sz < 0)
266 return endOffset();
267
268 return object_sz;
269 }
270
271 int64_t
272 MemObject::expectedReplySize() const
273 {
274 debugs(20, 7, HERE << "object_sz: " << object_sz);
275 if (object_sz >= 0) // complete() has been called; we know the exact answer
276 return object_sz;
277
278 if (_reply) {
279 const int64_t clen = _reply->bodySize(method);
280 debugs(20, 7, HERE << "clen: " << clen);
281 if (clen >= 0 && _reply->hdr_sz > 0) // yuck: HttpMsg sets hdr_sz to 0
282 return clen + _reply->hdr_sz;
283 }
284
285 return -1; // not enough information to predict
286 }
287
288 void
289 MemObject::reset()
290 {
291 assert(swapout.sio == NULL);
292 data_hdr.freeContent();
293 inmem_lo = 0;
294 /* Should we check for clients? */
295 }
296
297 int64_t
298 MemObject::lowestMemReaderOffset() const
299 {
300 LowestMemReader lowest (endOffset() + 1);
301
302 for_each <LowestMemReader>(clients, lowest);
303
304 return lowest.current;
305 }
306
307 /* XXX: This is wrong. It breaks *badly* on range combining */
308 bool
309 MemObject::readAheadPolicyCanRead() const
310 {
311 const bool canRead = endOffset() - getReply()->hdr_sz <
312 lowestMemReaderOffset() + Config.readAheadGap;
313
314 if (!canRead) {
315 debugs(19, 9, "no: " << endOffset() << '-' << getReply()->hdr_sz <<
316 " < " << lowestMemReaderOffset() << '+' << Config.readAheadGap);
317 }
318
319 return canRead;
320 }
321
322 void
323 MemObject::addClient(store_client *aClient)
324 {
325 ++nclients;
326 dlinkAdd(aClient, &aClient->node, &clients);
327 }
328
329 #if URL_CHECKSUM_DEBUG
330 void
331 MemObject::checkUrlChecksum () const
332 {
333 assert(chksum == url_checksum(urlXXX()));
334 }
335
336 #endif
337
338 /*
339 * How much of the object data is on the disk?
340 */
341 int64_t
342 MemObject::objectBytesOnDisk() const
343 {
344 /*
345 * NOTE: storeOffset() represents the disk file size,
346 * not the amount of object data on disk.
347 *
348 * If we don't have at least 'swap_hdr_sz' bytes
349 * then none of the object data is on disk.
350 *
351 * This should still be safe if swap_hdr_sz == 0,
352 * meaning we haven't even opened the swapout file
353 * yet.
354 */
355
356 if (swapout.sio.getRaw() == NULL)
357 return 0;
358
359 int64_t nwritten = swapout.sio->offset();
360
361 if (nwritten <= (int64_t)swap_hdr_sz)
362 return 0;
363
364 return (nwritten - swap_hdr_sz);
365 }
366
367 int64_t
368 MemObject::policyLowestOffsetToKeep(bool swap) const
369 {
370 /*
371 * Careful. lowest_offset can be greater than endOffset(), such
372 * as in the case of a range request.
373 */
374 int64_t lowest_offset = lowestMemReaderOffset();
375
376 if (endOffset() < lowest_offset ||
377 endOffset() - inmem_lo > (int64_t)Config.Store.maxInMemObjSize ||
378 (swap && !Config.onoff.memory_cache_first))
379 return lowest_offset;
380
381 return inmem_lo;
382 }
383
384 void
385 MemObject::trimSwappable()
386 {
387 int64_t new_mem_lo = policyLowestOffsetToKeep(1);
388 /*
389 * We should only free up to what we know has been written
390 * to disk, not what has been queued for writing. Otherwise
391 * there will be a chunk of the data which is not in memory
392 * and is not yet on disk.
393 * The -1 makes sure the page isn't freed until storeSwapOut has
394 * walked to the next page.
395 */
396 int64_t on_disk;
397
398 if ((on_disk = objectBytesOnDisk()) - 1 < new_mem_lo)
399 new_mem_lo = on_disk - 1;
400
401 if (new_mem_lo == -1)
402 new_mem_lo = 0; /* the above might become -1 */
403
404 data_hdr.freeDataUpto(new_mem_lo);
405
406 inmem_lo = new_mem_lo;
407 }
408
409 void
410 MemObject::trimUnSwappable()
411 {
412 if (const int64_t new_mem_lo = policyLowestOffsetToKeep(false)) {
413 assert (new_mem_lo > 0);
414 data_hdr.freeDataUpto(new_mem_lo);
415 inmem_lo = new_mem_lo;
416 } // else we should not trim anything at this time
417 }
418
419 bool
420 MemObject::isContiguous() const
421 {
422 bool result = data_hdr.hasContigousContentRange (Range<int64_t>(inmem_lo, endOffset()));
423 /* XXX : make this higher level */
424 debugs (19, result ? 4 :3, "MemObject::isContiguous: Returning " << (result ? "true" : "false"));
425 return result;
426 }
427
428 int
429 MemObject::mostBytesWanted(int max, bool ignoreDelayPools) const
430 {
431 #if USE_DELAY_POOLS
432 if (!ignoreDelayPools) {
433 /* identify delay id with largest allowance */
434 DelayId largestAllowance = mostBytesAllowed ();
435 return largestAllowance.bytesWanted(0, max);
436 }
437 #endif
438
439 return max;
440 }
441
442 void
443 MemObject::setNoDelay(bool const newValue)
444 {
445 #if USE_DELAY_POOLS
446
447 for (dlink_node *node = clients.head; node; node = node->next) {
448 store_client *sc = (store_client *) node->data;
449 sc->delayId.setNoDelay(newValue);
450 }
451
452 #endif
453 }
454
455 void
456 MemObject::delayRead(DeferredRead const &aRead)
457 {
458 deferredReads.delayRead(aRead);
459 }
460
461 void
462 MemObject::kickReads()
463 {
464 deferredReads.kickReads(-1);
465 }
466
467 #if USE_DELAY_POOLS
468 DelayId
469 MemObject::mostBytesAllowed() const
470 {
471 int j;
472 int jmax = -1;
473 DelayId result;
474
475 for (dlink_node *node = clients.head; node; node = node->next) {
476 store_client *sc = (store_client *) node->data;
477 #if 0
478 /* This test is invalid because the client may be writing data
479 * and thus will want data immediately.
480 * If we include the test, there is a race condition when too much
481 * data is read - if all sc's are writing when a read is scheduled.
482 * XXX: fixme.
483 */
484
485 if (!sc->callbackPending())
486 /* not waiting for more data */
487 continue;
488
489 #endif
490
491 j = sc->delayId.bytesWanted(0, sc->copyInto.length);
492
493 if (j > jmax) {
494 jmax = j;
495 result = sc->delayId;
496 }
497 }
498
499 return result;
500 }
501
502 #endif
503
504 int64_t
505 MemObject::availableForSwapOut() const
506 {
507 return endOffset() - swapout.queue_offset;
508 }
509