]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_client.cc
Identify collapsed transactions (#213)
[thirdparty/squid.git] / src / store_client.cc
1 /*
2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 90 Storage Manager Client-Side Interface */
10
11 #include "squid.h"
12 #include "acl/FilledChecklist.h"
13 #include "event.h"
14 #include "globals.h"
15 #include "HttpReply.h"
16 #include "HttpRequest.h"
17 #include "MemBuf.h"
18 #include "MemObject.h"
19 #include "mime_header.h"
20 #include "profiler/Profiler.h"
21 #include "SquidConfig.h"
22 #include "StatCounters.h"
23 #include "Store.h"
24 #include "store_swapin.h"
25 #include "StoreClient.h"
26 #include "StoreMeta.h"
27 #include "StoreMetaUnpacker.h"
28 #if USE_DELAY_POOLS
29 #include "DelayPools.h"
30 #endif
31
32 /*
33 * NOTE: 'Header' refers to the swapfile metadata header.
34 * 'OBJHeader' refers to the object header, with cannonical
35 * processed object headers (which may derive from FTP/HTTP etc
36 * upstream protocols
37 * 'Body' refers to the swapfile body, which is the full
38 * HTTP reply (including HTTP headers and body).
39 */
40 static StoreIOState::STRCB storeClientReadBody;
41 static StoreIOState::STRCB storeClientReadHeader;
42 static void storeClientCopy2(StoreEntry * e, store_client * sc);
43 static EVH storeClientCopyEvent;
44 static bool CheckQuickAbortIsReasonable(StoreEntry * entry);
45
46 CBDATA_CLASS_INIT(store_client);
47
48 /* StoreClient */
49
50 bool
51 StoreClient::onCollapsingPath() const
52 {
53 if (!Config.onoff.collapsed_forwarding)
54 return false;
55
56 if (!Config.accessList.collapsedForwardingAccess)
57 return true;
58
59 ACLFilledChecklist checklist(Config.accessList.collapsedForwardingAccess, nullptr, nullptr);
60 fillChecklist(checklist);
61 return checklist.fastCheck().allowed();
62 }
63
64 bool
65 StoreClient::startCollapsingOn(const StoreEntry &e, const bool doingRevalidation)
66 {
67 if (!e.hittingRequiresCollapsing())
68 return false; // collapsing is impossible due to the entry state
69
70 if (!onCollapsingPath())
71 return false; // collapsing is impossible due to Squid configuration
72
73 /* collapsing is possible; the caller must collapse */
74
75 if (const auto tags = loggingTags()) {
76 if (doingRevalidation)
77 tags->collapsingHistory.revalidationCollapses++;
78 else
79 tags->collapsingHistory.otherCollapses++;
80 }
81
82 debugs(85, 5, e << " doingRevalidation=" << doingRevalidation);
83 return true;
84 }
85
86 void
87 StoreClient::fillChecklist(ACLFilledChecklist &checklist) const
88 {
89 // TODO: Consider moving all CF-related methods into a new dedicated class.
90 Must(!"startCollapsingOn() caller must override fillChecklist()");
91 }
92
93 /* store_client */
94
95 bool
96 store_client::memReaderHasLowerOffset(int64_t anOffset) const
97 {
98 return getType() == STORE_MEM_CLIENT && copyInto.offset < anOffset;
99 }
100
101 int
102 store_client::getType() const
103 {
104 return type;
105 }
106
107 #if STORE_CLIENT_LIST_DEBUG
108 static store_client *
109 storeClientListSearch(const MemObject * mem, void *data)
110 {
111 dlink_node *node;
112 store_client *sc = NULL;
113
114 for (node = mem->clients.head; node; node = node->next) {
115 sc = node->data;
116
117 if (sc->owner == data)
118 return sc;
119 }
120
121 return NULL;
122 }
123
124 int
125 storeClientIsThisAClient(store_client * sc, void *someClient)
126 {
127 return sc->owner == someClient;
128 }
129
130 #endif
131 #include "HttpRequest.h"
132
133 /* add client with fd to client list */
134 store_client *
135 storeClientListAdd(StoreEntry * e, void *data)
136 {
137 MemObject *mem = e->mem_obj;
138 store_client *sc;
139 assert(mem);
140 #if STORE_CLIENT_LIST_DEBUG
141
142 if (storeClientListSearch(mem, data) != NULL)
143 /* XXX die! */
144 assert(1 == 0);
145
146 #endif
147
148 sc = new store_client (e);
149
150 mem->addClient(sc);
151
152 return sc;
153 }
154
155 void
156 store_client::callback(ssize_t sz, bool error)
157 {
158 size_t bSz = 0;
159
160 if (sz >= 0 && !error)
161 bSz = sz;
162
163 StoreIOBuffer result(bSz, 0,copyInto.data);
164
165 if (sz < 0 || error)
166 result.flags.error = 1;
167
168 result.offset = cmp_offset;
169 assert(_callback.pending());
170 cmp_offset = copyInto.offset + bSz;
171 STCB *temphandler = _callback.callback_handler;
172 void *cbdata = _callback.callback_data;
173 _callback = Callback(NULL, NULL);
174 copyInto.data = NULL;
175
176 if (cbdataReferenceValid(cbdata))
177 temphandler(cbdata, result);
178
179 cbdataReferenceDone(cbdata);
180 }
181
182 static void
183 storeClientCopyEvent(void *data)
184 {
185 store_client *sc = (store_client *)data;
186 debugs(90, 3, "storeClientCopyEvent: Running");
187 assert (sc->flags.copy_event_pending);
188 sc->flags.copy_event_pending = false;
189
190 if (!sc->_callback.pending())
191 return;
192
193 storeClientCopy2(sc->entry, sc);
194 }
195
196 store_client::store_client(StoreEntry *e) :
197 cmp_offset(0),
198 #if STORE_CLIENT_LIST_DEBUG
199 owner(cbdataReference(data)),
200 #endif
201 entry(e),
202 type(e->storeClientType()),
203 object_ok(true)
204 {
205 flags.disk_io_pending = false;
206 flags.store_copying = false;
207 flags.copy_event_pending = false;
208 ++ entry->refcount;
209
210 if (getType() == STORE_DISK_CLIENT) {
211 /* assert we'll be able to get the data we want */
212 /* maybe we should open swapin_sio here */
213 assert(entry->hasDisk() || entry->swappingOut());
214 }
215 }
216
217 store_client::~store_client()
218 {}
219
220 /* copy bytes requested by the client */
221 void
222 storeClientCopy(store_client * sc,
223 StoreEntry * e,
224 StoreIOBuffer copyInto,
225 STCB * callback,
226 void *data)
227 {
228 assert (sc != NULL);
229 sc->copy(e, copyInto,callback,data);
230 }
231
232 void
233 store_client::copy(StoreEntry * anEntry,
234 StoreIOBuffer copyRequest,
235 STCB * callback_fn,
236 void *data)
237 {
238 assert (anEntry == entry);
239 assert (callback_fn);
240 assert (data);
241 assert(!EBIT_TEST(entry->flags, ENTRY_ABORTED));
242 debugs(90, 3, "store_client::copy: " << entry->getMD5Text() << ", from " <<
243 copyRequest.offset << ", for length " <<
244 (int) copyRequest.length << ", cb " << callback_fn << ", cbdata " <<
245 data);
246
247 #if STORE_CLIENT_LIST_DEBUG
248
249 assert(this == storeClientListSearch(entry->mem_obj, data));
250 #endif
251
252 assert(!_callback.pending());
253 #if ONLYCONTIGUOUSREQUESTS
254
255 assert(cmp_offset == copyRequest.offset);
256 #endif
257 /* range requests will skip into the body */
258 cmp_offset = copyRequest.offset;
259 _callback = Callback (callback_fn, cbdataReference(data));
260 copyInto.data = copyRequest.data;
261 copyInto.length = copyRequest.length;
262 copyInto.offset = copyRequest.offset;
263
264 static bool copying (false);
265 assert (!copying);
266 copying = true;
267 PROF_start(storeClient_kickReads);
268 /* we might be blocking comm reads due to readahead limits
269 * now we have a new offset, trigger those reads...
270 */
271 entry->mem_obj->kickReads();
272 PROF_stop(storeClient_kickReads);
273 copying = false;
274
275 anEntry->lock("store_client::copy"); // see deletion note below
276
277 storeClientCopy2(entry, this);
278
279 // Bug 3480: This store_client object may be deleted now if, for example,
280 // the client rejects the hit response copied above. Use on-stack pointers!
281
282 #if USE_ADAPTATION
283 anEntry->kickProducer();
284 #endif
285 anEntry->unlock("store_client::copy");
286
287 // Add no code here. This object may no longer exist.
288 }
289
290 /// Whether there is (or will be) more entry data for us.
291 bool
292 store_client::moreToSend() const
293 {
294 if (entry->store_status == STORE_PENDING)
295 return true; // there may be more coming
296
297 /* STORE_OK, including aborted entries: no more data is coming */
298
299 const int64_t len = entry->objectLen();
300
301 // If we do not know the entry length, then we have to open the swap file.
302 const bool canSwapIn = entry->hasDisk();
303 if (len < 0)
304 return canSwapIn;
305
306 if (copyInto.offset >= len)
307 return false; // sent everything there is
308
309 if (canSwapIn)
310 return true; // if we lack prefix, we can swap it in
311
312 // If we cannot swap in, make sure we have what we want in RAM. Otherwise,
313 // scheduleRead calls scheduleDiskRead which asserts without a swap file.
314 const MemObject *mem = entry->mem_obj;
315 return mem &&
316 mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
317 }
318
319 static void
320 storeClientCopy2(StoreEntry * e, store_client * sc)
321 {
322 /* reentrancy not allowed - note this could lead to
323 * dropped events
324 */
325
326 if (sc->flags.copy_event_pending) {
327 return;
328 }
329
330 if (sc->flags.store_copying) {
331 sc->flags.copy_event_pending = true;
332 debugs(90, 3, "storeClientCopy2: Queueing storeClientCopyEvent()");
333 eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0);
334 return;
335 }
336
337 debugs(90, 3, "storeClientCopy2: " << e->getMD5Text());
338 assert(sc->_callback.pending());
339 /*
340 * We used to check for ENTRY_ABORTED here. But there were some
341 * problems. For example, we might have a slow client (or two) and
342 * the peer server is reading far ahead and swapping to disk. Even
343 * if the peer aborts, we want to give the client(s)
344 * everything we got before the abort condition occurred.
345 */
346 /* Warning: doCopy may indirectly free itself in callbacks,
347 * hence the lock to keep it active for the duration of
348 * this function
349 * XXX: Locking does not prevent calling sc destructor (it only prevents
350 * freeing sc memory) so sc may become invalid from C++ p.o.v.
351 */
352 CbcPointer<store_client> tmpLock = sc;
353 assert (!sc->flags.store_copying);
354 sc->doCopy(e);
355 assert(!sc->flags.store_copying);
356 }
357
358 void
359 store_client::doCopy(StoreEntry *anEntry)
360 {
361 assert (anEntry == entry);
362 flags.store_copying = true;
363 MemObject *mem = entry->mem_obj;
364
365 debugs(33, 5, "store_client::doCopy: co: " <<
366 copyInto.offset << ", hi: " <<
367 mem->endOffset());
368
369 if (!moreToSend()) {
370 /* There is no more to send! */
371 debugs(33, 3, HERE << "There is no more to send!");
372 callback(0);
373 flags.store_copying = false;
374 return;
375 }
376
377 /* Check that we actually have data */
378 if (anEntry->store_status == STORE_PENDING && copyInto.offset >= mem->endOffset()) {
379 debugs(90, 3, "store_client::doCopy: Waiting for more");
380 flags.store_copying = false;
381 return;
382 }
383
384 /*
385 * Slight weirdness here. We open a swapin file for any
386 * STORE_DISK_CLIENT, even if we can copy the requested chunk
387 * from memory in the next block. We must try to open the
388 * swapin file before sending any data to the client side. If
389 * we postpone the open, and then can not open the file later
390 * on, the client loses big time. Its transfer just gets cut
391 * off. Better to open it early (while the client side handler
392 * is clientCacheHit) so that we can fall back to a cache miss
393 * if needed.
394 */
395
396 if (STORE_DISK_CLIENT == getType() && swapin_sio == NULL) {
397 if (!startSwapin())
398 return; // failure
399 }
400 scheduleRead();
401 }
402
403 /// opens the swapin "file" if possible; otherwise, fail()s and returns false
404 bool
405 store_client::startSwapin()
406 {
407 debugs(90, 3, "store_client::doCopy: Need to open swap in file");
408 /* gotta open the swapin file */
409
410 if (storeTooManyDiskFilesOpen()) {
411 /* yuck -- this causes a TCP_SWAPFAIL_MISS on the client side */
412 fail();
413 flags.store_copying = false;
414 return false;
415 } else if (!flags.disk_io_pending) {
416 /* Don't set store_io_pending here */
417 storeSwapInStart(this);
418
419 if (swapin_sio == NULL) {
420 fail();
421 flags.store_copying = false;
422 return false;
423 }
424
425 return true;
426 } else {
427 debugs(90, DBG_IMPORTANT, "WARNING: Averted multiple fd operation (1)");
428 flags.store_copying = false;
429 return false;
430 }
431 }
432
433 void
434 store_client::scheduleRead()
435 {
436 MemObject *mem = entry->mem_obj;
437
438 if (copyInto.offset >= mem->inmem_lo && copyInto.offset < mem->endOffset())
439 scheduleMemRead();
440 else
441 scheduleDiskRead();
442 }
443
444 void
445 store_client::scheduleDiskRead()
446 {
447 /* What the client wants is not in memory. Schedule a disk read */
448 if (getType() == STORE_DISK_CLIENT) {
449 // we should have called startSwapin() already
450 assert(swapin_sio != NULL);
451 } else if (!swapin_sio && !startSwapin()) {
452 debugs(90, 3, "bailing after swapin start failure for " << *entry);
453 assert(!flags.store_copying);
454 return;
455 }
456
457 assert(!flags.disk_io_pending);
458
459 debugs(90, 3, "reading " << *entry << " from disk");
460
461 fileRead();
462
463 flags.store_copying = false;
464 }
465
466 void
467 store_client::scheduleMemRead()
468 {
469 /* What the client wants is in memory */
470 /* Old style */
471 debugs(90, 3, "store_client::doCopy: Copying normal from memory");
472 size_t sz = entry->mem_obj->data_hdr.copy(copyInto);
473 callback(sz);
474 flags.store_copying = false;
475 }
476
477 void
478 store_client::fileRead()
479 {
480 MemObject *mem = entry->mem_obj;
481
482 assert(_callback.pending());
483 assert(!flags.disk_io_pending);
484 flags.disk_io_pending = true;
485
486 if (mem->swap_hdr_sz != 0)
487 if (entry->swappingOut())
488 assert(mem->swapout.sio->offset() > copyInto.offset + (int64_t)mem->swap_hdr_sz);
489
490 storeRead(swapin_sio,
491 copyInto.data,
492 copyInto.length,
493 copyInto.offset + mem->swap_hdr_sz,
494 mem->swap_hdr_sz == 0 ? storeClientReadHeader
495 : storeClientReadBody,
496 this);
497 }
498
499 void
500 store_client::readBody(const char *, ssize_t len)
501 {
502 int parsed_header = 0;
503
504 // Don't assert disk_io_pending here.. may be called by read_header
505 flags.disk_io_pending = false;
506 assert(_callback.pending());
507 debugs(90, 3, "storeClientReadBody: len " << len << "");
508
509 if (copyInto.offset == 0 && len > 0 && entry->getReply()->sline.status() == Http::scNone) {
510 /* Our structure ! */
511 HttpReply *rep = (HttpReply *) entry->getReply(); // bypass const
512
513 if (!rep->parseCharBuf(copyInto.data, headersEnd(copyInto.data, len))) {
514 debugs(90, DBG_CRITICAL, "Could not parse headers from on disk object");
515 } else {
516 parsed_header = 1;
517 }
518 }
519
520 const HttpReply *rep = entry->getReply();
521 if (len > 0 && rep && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
522 storeGetMemSpace(len);
523 // The above may start to free our object so we need to check again
524 if (entry->mem_obj->inmem_lo == 0) {
525 /* Copy read data back into memory.
526 * copyInto.offset includes headers, which is what mem cache needs
527 */
528 int64_t mem_offset = entry->mem_obj->endOffset();
529 if ((copyInto.offset == mem_offset) || (parsed_header && mem_offset == rep->hdr_sz)) {
530 entry->mem_obj->write(StoreIOBuffer(len, copyInto.offset, copyInto.data));
531 }
532 }
533 }
534
535 callback(len);
536 }
537
538 void
539 store_client::fail()
540 {
541 object_ok = false;
542 /* synchronous open failures callback from the store,
543 * before startSwapin detects the failure.
544 * TODO: fix this inconsistent behaviour - probably by
545 * having storeSwapInStart become a callback functions,
546 * not synchronous
547 */
548
549 if (_callback.pending())
550 callback(0, true);
551 }
552
553 static void
554 storeClientReadHeader(void *data, const char *buf, ssize_t len, StoreIOState::Pointer)
555 {
556 store_client *sc = (store_client *)data;
557 sc->readHeader(buf, len);
558 }
559
560 static void
561 storeClientReadBody(void *data, const char *buf, ssize_t len, StoreIOState::Pointer)
562 {
563 store_client *sc = (store_client *)data;
564 sc->readBody(buf, len);
565 }
566
567 bool
568 store_client::unpackHeader(char const *buf, ssize_t len)
569 {
570 int xerrno = errno; // FIXME: where does errno come from?
571 debugs(90, 3, "store_client::unpackHeader: len " << len << "");
572
573 if (len < 0) {
574 debugs(90, 3, "WARNING: unpack error: " << xstrerr(xerrno));
575 return false;
576 }
577
578 int swap_hdr_sz = 0;
579 tlv *tlv_list = nullptr;
580 try {
581 StoreMetaUnpacker aBuilder(buf, len, &swap_hdr_sz);
582 tlv_list = aBuilder.createStoreMeta();
583 } catch (const std::exception &e) {
584 debugs(90, DBG_IMPORTANT, "WARNING: failed to unpack metadata because " << e.what());
585 return false;
586 }
587 assert(tlv_list);
588
589 /*
590 * Check the meta data and make sure we got the right object.
591 */
592 for (tlv *t = tlv_list; t; t = t->next) {
593 if (!t->checkConsistency(entry)) {
594 storeSwapTLVFree(tlv_list);
595 return false;
596 }
597 }
598
599 storeSwapTLVFree(tlv_list);
600
601 assert(swap_hdr_sz >= 0);
602 entry->mem_obj->swap_hdr_sz = swap_hdr_sz;
603 if (entry->swap_file_sz > 0) { // collapsed hits may not know swap_file_sz
604 assert(entry->swap_file_sz >= static_cast<uint64_t>(swap_hdr_sz));
605 entry->mem_obj->object_sz = entry->swap_file_sz - swap_hdr_sz;
606 }
607 debugs(90, 5, "store_client::unpackHeader: swap_file_sz=" <<
608 entry->swap_file_sz << "( " << swap_hdr_sz << " + " <<
609 entry->mem_obj->object_sz << ")");
610 return true;
611 }
612
613 void
614 store_client::readHeader(char const *buf, ssize_t len)
615 {
616 MemObject *const mem = entry->mem_obj;
617
618 assert(flags.disk_io_pending);
619 flags.disk_io_pending = false;
620 assert(_callback.pending());
621
622 // abort if we fail()'d earlier
623 if (!object_ok)
624 return;
625
626 if (!unpackHeader(buf, len)) {
627 fail();
628 return;
629 }
630
631 /*
632 * If our last read got some data the client wants, then give
633 * it to them, otherwise schedule another read.
634 */
635 size_t body_sz = len - mem->swap_hdr_sz;
636
637 if (copyInto.offset < static_cast<int64_t>(body_sz)) {
638 /*
639 * we have (part of) what they want
640 */
641 size_t copy_sz = min(copyInto.length, body_sz);
642 debugs(90, 3, "storeClientReadHeader: copying " << copy_sz << " bytes of body");
643 memmove(copyInto.data, copyInto.data + mem->swap_hdr_sz, copy_sz);
644
645 readBody(copyInto.data, copy_sz);
646
647 return;
648 }
649
650 /*
651 * we don't have what the client wants, but at least we now
652 * know the swap header size.
653 */
654 fileRead();
655 }
656
657 int
658 storeClientCopyPending(store_client * sc, StoreEntry * e, void *data)
659 {
660 #if STORE_CLIENT_LIST_DEBUG
661 assert(sc == storeClientListSearch(e->mem_obj, data));
662 #endif
663 #ifndef SILLY_CODE
664
665 assert(sc);
666 #endif
667
668 assert(sc->entry == e);
669 #if SILLY_CODE
670
671 if (sc == NULL)
672 return 0;
673
674 #endif
675
676 if (!sc->_callback.pending())
677 return 0;
678
679 return 1;
680 }
681
682 /*
683 * This routine hasn't been optimised to take advantage of the
684 * passed sc. Yet.
685 */
686 int
687 storeUnregister(store_client * sc, StoreEntry * e, void *data)
688 {
689 MemObject *mem = e->mem_obj;
690 #if STORE_CLIENT_LIST_DEBUG
691
692 assert(sc == storeClientListSearch(e->mem_obj, data));
693 #endif
694
695 if (mem == NULL)
696 return 0;
697
698 debugs(90, 3, "storeUnregister: called for '" << e->getMD5Text() << "'");
699
700 if (sc == NULL) {
701 debugs(90, 3, "storeUnregister: No matching client for '" << e->getMD5Text() << "'");
702 return 0;
703 }
704
705 if (mem->clientCount() == 0) {
706 debugs(90, 3, "storeUnregister: Consistency failure - store client being unregistered is not in the mem object's list for '" << e->getMD5Text() << "'");
707 return 0;
708 }
709
710 dlinkDelete(&sc->node, &mem->clients);
711 -- mem->nclients;
712
713 if (e->store_status == STORE_OK && !e->swappedOut())
714 e->swapOut();
715
716 if (sc->swapin_sio != NULL) {
717 storeClose(sc->swapin_sio, StoreIOState::readerDone);
718 sc->swapin_sio = NULL;
719 ++statCounter.swap.ins;
720 }
721
722 if (sc->_callback.pending()) {
723 /* callback with ssize = -1 to indicate unexpected termination */
724 debugs(90, 3, "store_client for " << *e << " has a callback");
725 sc->fail();
726 }
727
728 #if STORE_CLIENT_LIST_DEBUG
729 cbdataReferenceDone(sc->owner);
730
731 #endif
732
733 delete sc;
734
735 assert(e->locked());
736 // An entry locked by others may be unlocked (and destructed) by others, so
737 // we must lock again to safely dereference e after CheckQuickAbortIsReasonable().
738 e->lock("storeUnregister");
739
740 if (CheckQuickAbortIsReasonable(e))
741 e->abort();
742 else
743 mem->kickReads();
744
745 #if USE_ADAPTATION
746 e->kickProducer();
747 #endif
748
749 e->unlock("storeUnregister");
750 return 1;
751 }
752
753 /* Call handlers waiting for data to be appended to E. */
754 void
755 StoreEntry::invokeHandlers()
756 {
757 if (EBIT_TEST(flags, DELAY_SENDING)) {
758 debugs(90, 3, "DELAY_SENDING is on, exiting " << *this);
759 return;
760 }
761 if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT)) {
762 debugs(90, 3, "ENTRY_FWD_HDR_WAIT is on, exiting " << *this);
763 return;
764 }
765
766 /* Commit what we can to disk, if appropriate */
767 swapOut();
768 int i = 0;
769 store_client *sc;
770 dlink_node *nx = NULL;
771 dlink_node *node;
772
773 PROF_start(InvokeHandlers);
774
775 debugs(90, 3, "InvokeHandlers: " << getMD5Text() );
776 /* walk the entire list looking for valid callbacks */
777
778 for (node = mem_obj->clients.head; node; node = nx) {
779 sc = (store_client *)node->data;
780 nx = node->next;
781 debugs(90, 3, "StoreEntry::InvokeHandlers: checking client #" << i );
782 ++i;
783
784 if (!sc->_callback.pending())
785 continue;
786
787 if (sc->flags.disk_io_pending)
788 continue;
789
790 storeClientCopy2(this, sc);
791 }
792 PROF_stop(InvokeHandlers);
793 }
794
795 // Does not account for remote readers/clients.
796 int
797 storePendingNClients(const StoreEntry * e)
798 {
799 MemObject *mem = e->mem_obj;
800 int npend = NULL == mem ? 0 : mem->nclients;
801 debugs(90, 3, "storePendingNClients: returning " << npend);
802 return npend;
803 }
804
805 /* return true if the request should be aborted */
806 static bool
807 CheckQuickAbortIsReasonable(StoreEntry * entry)
808 {
809 assert(entry);
810 debugs(90, 3, "entry=" << *entry);
811
812 if (storePendingNClients(entry) > 0) {
813 debugs(90, 3, "quick-abort? NO storePendingNClients() > 0");
814 return false;
815 }
816
817 if (!shutting_down && Store::Root().transientReaders(*entry)) {
818 debugs(90, 3, "quick-abort? NO still have one or more transient readers");
819 return false;
820 }
821
822 if (entry->store_status != STORE_PENDING) {
823 debugs(90, 3, "quick-abort? NO store_status != STORE_PENDING");
824 return false;
825 }
826
827 if (EBIT_TEST(entry->flags, ENTRY_SPECIAL)) {
828 debugs(90, 3, "quick-abort? NO ENTRY_SPECIAL");
829 return false;
830 }
831
832 MemObject * const mem = entry->mem_obj;
833 assert(mem);
834 debugs(90, 3, "mem=" << mem);
835
836 if (mem->request && !mem->request->flags.cachable) {
837 debugs(90, 3, "quick-abort? YES !mem->request->flags.cachable");
838 return true;
839 }
840
841 if (EBIT_TEST(entry->flags, KEY_PRIVATE)) {
842 debugs(90, 3, "quick-abort? YES KEY_PRIVATE");
843 return true;
844 }
845
846 int64_t expectlen = entry->getReply()->content_length + entry->getReply()->hdr_sz;
847
848 if (expectlen < 0) {
849 /* expectlen is < 0 if *no* information about the object has been received */
850 debugs(90, 3, "quick-abort? YES no object data received yet");
851 return true;
852 }
853
854 int64_t curlen = mem->endOffset();
855
856 if (Config.quickAbort.min < 0) {
857 debugs(90, 3, "quick-abort? NO disabled");
858 return false;
859 }
860
861 if (mem->request && mem->request->range && mem->request->getRangeOffsetLimit() < 0) {
862 /* Don't abort if the admin has configured range_ofset -1 to download fully for caching. */
863 debugs(90, 3, "quick-abort? NO admin configured range replies to full-download");
864 return false;
865 }
866
867 if (curlen > expectlen) {
868 debugs(90, 3, "quick-abort? YES bad content length (" << curlen << " of " << expectlen << " bytes received)");
869 return true;
870 }
871
872 if ((expectlen - curlen) < (Config.quickAbort.min << 10)) {
873 debugs(90, 3, "quick-abort? NO only a little more object left to receive");
874 return false;
875 }
876
877 if ((expectlen - curlen) > (Config.quickAbort.max << 10)) {
878 debugs(90, 3, "quick-abort? YES too much left to go");
879 return true;
880 }
881
882 if (expectlen < 100) {
883 debugs(90, 3, "quick-abort? NO avoid FPE");
884 return false;
885 }
886
887 if ((curlen / (expectlen / 100)) > (Config.quickAbort.pct)) {
888 debugs(90, 3, "quick-abort? NO past point of no return");
889 return false;
890 }
891
892 debugs(90, 3, "quick-abort? YES default");
893 return true;
894 }
895
896 void
897 store_client::dumpStats(MemBuf * output, int clientNumber) const
898 {
899 if (_callback.pending())
900 return;
901
902 output->appendf("\tClient #%d, %p\n", clientNumber, _callback.callback_data);
903 output->appendf("\t\tcopy_offset: %" PRId64 "\n", copyInto.offset);
904 output->appendf("\t\tcopy_size: %" PRIuSIZE "\n", copyInto.length);
905 output->append("\t\tflags:", 8);
906
907 if (flags.disk_io_pending)
908 output->append(" disk_io_pending", 16);
909
910 if (flags.store_copying)
911 output->append(" store_copying", 14);
912
913 if (flags.copy_event_pending)
914 output->append(" copy_event_pending", 19);
915
916 output->append("\n",1);
917 }
918
919 bool
920 store_client::Callback::pending() const
921 {
922 return callback_handler && callback_data;
923 }
924
925 store_client::Callback::Callback(STCB *function, void *data) : callback_handler(function), callback_data (data) {}
926
927 #if USE_DELAY_POOLS
928 void
929 store_client::setDelayId(DelayId delay_id)
930 {
931 delayId = delay_id;
932 }
933 #endif
934