]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_client.cc
Source Format Enforcement (#763)
[thirdparty/squid.git] / src / store_client.cc
1 /*
2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 90 Storage Manager Client-Side Interface */
10
11 #include "squid.h"
12 #include "acl/FilledChecklist.h"
13 #include "base/CodeContext.h"
14 #include "event.h"
15 #include "globals.h"
16 #include "HttpReply.h"
17 #include "HttpRequest.h"
18 #include "MemBuf.h"
19 #include "MemObject.h"
20 #include "mime_header.h"
21 #include "profiler/Profiler.h"
22 #include "SquidConfig.h"
23 #include "StatCounters.h"
24 #include "Store.h"
25 #include "store_swapin.h"
26 #include "StoreClient.h"
27 #include "StoreMeta.h"
28 #include "StoreMetaUnpacker.h"
29 #if USE_DELAY_POOLS
30 #include "DelayPools.h"
31 #endif
32
33 /*
34 * NOTE: 'Header' refers to the swapfile metadata header.
35 * 'OBJHeader' refers to the object header, with canonical
36 * processed object headers (which may derive from FTP/HTTP etc
37 * upstream protocols
38 * 'Body' refers to the swapfile body, which is the full
39 * HTTP reply (including HTTP headers and body).
40 */
41 static StoreIOState::STRCB storeClientReadBody;
42 static StoreIOState::STRCB storeClientReadHeader;
43 static void storeClientCopy2(StoreEntry * e, store_client * sc);
44 static EVH storeClientCopyEvent;
45 static bool CheckQuickAbortIsReasonable(StoreEntry * entry);
46
47 CBDATA_CLASS_INIT(store_client);
48
49 /* StoreClient */
50
51 bool
52 StoreClient::onCollapsingPath() const
53 {
54 if (!Config.onoff.collapsed_forwarding)
55 return false;
56
57 if (!Config.accessList.collapsedForwardingAccess)
58 return true;
59
60 ACLFilledChecklist checklist(Config.accessList.collapsedForwardingAccess, nullptr, nullptr);
61 fillChecklist(checklist);
62 return checklist.fastCheck().allowed();
63 }
64
65 bool
66 StoreClient::startCollapsingOn(const StoreEntry &e, const bool doingRevalidation) const
67 {
68 if (!e.hittingRequiresCollapsing())
69 return false; // collapsing is impossible due to the entry state
70
71 if (!onCollapsingPath())
72 return false; // collapsing is impossible due to Squid configuration
73
74 /* collapsing is possible; the caller must collapse */
75
76 if (const auto tags = loggingTags()) {
77 if (doingRevalidation)
78 tags->collapsingHistory.revalidationCollapses++;
79 else
80 tags->collapsingHistory.otherCollapses++;
81 }
82
83 debugs(85, 5, e << " doingRevalidation=" << doingRevalidation);
84 return true;
85 }
86
87 void
88 StoreClient::fillChecklist(ACLFilledChecklist &checklist) const
89 {
90 // TODO: Consider moving all CF-related methods into a new dedicated class.
91 Must(!"startCollapsingOn() caller must override fillChecklist()");
92 }
93
94 /* store_client */
95
96 bool
97 store_client::memReaderHasLowerOffset(int64_t anOffset) const
98 {
99 return getType() == STORE_MEM_CLIENT && copyInto.offset < anOffset;
100 }
101
102 int
103 store_client::getType() const
104 {
105 return type;
106 }
107
108 #if STORE_CLIENT_LIST_DEBUG
109 static store_client *
110 storeClientListSearch(const MemObject * mem, void *data)
111 {
112 dlink_node *node;
113 store_client *sc = NULL;
114
115 for (node = mem->clients.head; node; node = node->next) {
116 sc = node->data;
117
118 if (sc->owner == data)
119 return sc;
120 }
121
122 return NULL;
123 }
124
125 int
126 storeClientIsThisAClient(store_client * sc, void *someClient)
127 {
128 return sc->owner == someClient;
129 }
130
131 #endif
132 #include "HttpRequest.h"
133
134 /* add client with fd to client list */
135 store_client *
136 storeClientListAdd(StoreEntry * e, void *data)
137 {
138 MemObject *mem = e->mem_obj;
139 store_client *sc;
140 assert(mem);
141 #if STORE_CLIENT_LIST_DEBUG
142
143 if (storeClientListSearch(mem, data) != NULL)
144 /* XXX die! */
145 assert(1 == 0);
146
147 #endif
148
149 sc = new store_client (e);
150
151 mem->addClient(sc);
152
153 return sc;
154 }
155
156 void
157 store_client::callback(ssize_t sz, bool error)
158 {
159 size_t bSz = 0;
160
161 if (sz >= 0 && !error)
162 bSz = sz;
163
164 StoreIOBuffer result(bSz, 0,copyInto.data);
165
166 if (sz < 0 || error)
167 result.flags.error = 1;
168
169 result.offset = cmp_offset;
170 assert(_callback.pending());
171 cmp_offset = copyInto.offset + bSz;
172 STCB *temphandler = _callback.callback_handler;
173 void *cbdata = _callback.callback_data;
174 _callback = Callback(NULL, NULL);
175 copyInto.data = NULL;
176
177 if (cbdataReferenceValid(cbdata))
178 temphandler(cbdata, result);
179
180 cbdataReferenceDone(cbdata);
181 }
182
183 static void
184 storeClientCopyEvent(void *data)
185 {
186 store_client *sc = (store_client *)data;
187 debugs(90, 3, "storeClientCopyEvent: Running");
188 assert (sc->flags.copy_event_pending);
189 sc->flags.copy_event_pending = false;
190
191 if (!sc->_callback.pending())
192 return;
193
194 storeClientCopy2(sc->entry, sc);
195 }
196
197 store_client::store_client(StoreEntry *e) :
198 cmp_offset(0),
199 #if STORE_CLIENT_LIST_DEBUG
200 owner(cbdataReference(data)),
201 #endif
202 entry(e),
203 type(e->storeClientType()),
204 object_ok(true)
205 {
206 flags.disk_io_pending = false;
207 flags.store_copying = false;
208 flags.copy_event_pending = false;
209 ++ entry->refcount;
210
211 if (getType() == STORE_DISK_CLIENT) {
212 /* assert we'll be able to get the data we want */
213 /* maybe we should open swapin_sio here */
214 assert(entry->hasDisk() && !entry->swapoutFailed());
215 }
216 }
217
218 store_client::~store_client()
219 {}
220
221 /* copy bytes requested by the client */
222 void
223 storeClientCopy(store_client * sc,
224 StoreEntry * e,
225 StoreIOBuffer copyInto,
226 STCB * callback,
227 void *data)
228 {
229 assert (sc != NULL);
230 sc->copy(e, copyInto,callback,data);
231 }
232
233 void
234 store_client::copy(StoreEntry * anEntry,
235 StoreIOBuffer copyRequest,
236 STCB * callback_fn,
237 void *data)
238 {
239 assert (anEntry == entry);
240 assert (callback_fn);
241 assert (data);
242 assert(!EBIT_TEST(entry->flags, ENTRY_ABORTED));
243 debugs(90, 3, "store_client::copy: " << entry->getMD5Text() << ", from " <<
244 copyRequest.offset << ", for length " <<
245 (int) copyRequest.length << ", cb " << callback_fn << ", cbdata " <<
246 data);
247
248 #if STORE_CLIENT_LIST_DEBUG
249
250 assert(this == storeClientListSearch(entry->mem_obj, data));
251 #endif
252
253 assert(!_callback.pending());
254 #if ONLYCONTIGUOUSREQUESTS
255
256 assert(cmp_offset == copyRequest.offset);
257 #endif
258 /* range requests will skip into the body */
259 cmp_offset = copyRequest.offset;
260 _callback = Callback (callback_fn, cbdataReference(data));
261 copyInto.data = copyRequest.data;
262 copyInto.length = copyRequest.length;
263 copyInto.offset = copyRequest.offset;
264
265 static bool copying (false);
266 assert (!copying);
267 copying = true;
268 PROF_start(storeClient_kickReads);
269 /* we might be blocking comm reads due to readahead limits
270 * now we have a new offset, trigger those reads...
271 */
272 entry->mem_obj->kickReads();
273 PROF_stop(storeClient_kickReads);
274 copying = false;
275
276 anEntry->lock("store_client::copy"); // see deletion note below
277
278 storeClientCopy2(entry, this);
279
280 // Bug 3480: This store_client object may be deleted now if, for example,
281 // the client rejects the hit response copied above. Use on-stack pointers!
282
283 #if USE_ADAPTATION
284 anEntry->kickProducer();
285 #endif
286 anEntry->unlock("store_client::copy");
287
288 // Add no code here. This object may no longer exist.
289 }
290
291 /// Whether there is (or will be) more entry data for us.
292 bool
293 store_client::moreToSend() const
294 {
295 if (entry->store_status == STORE_PENDING)
296 return true; // there may be more coming
297
298 /* STORE_OK, including aborted entries: no more data is coming */
299
300 const int64_t len = entry->objectLen();
301
302 // If we do not know the entry length, then we have to open the swap file.
303 const bool canSwapIn = entry->hasDisk();
304 if (len < 0)
305 return canSwapIn;
306
307 if (copyInto.offset >= len)
308 return false; // sent everything there is
309
310 if (canSwapIn)
311 return true; // if we lack prefix, we can swap it in
312
313 // If we cannot swap in, make sure we have what we want in RAM. Otherwise,
314 // scheduleRead calls scheduleDiskRead which asserts without a swap file.
315 const MemObject *mem = entry->mem_obj;
316 return mem &&
317 mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
318 }
319
320 static void
321 storeClientCopy2(StoreEntry * e, store_client * sc)
322 {
323 /* reentrancy not allowed - note this could lead to
324 * dropped events
325 */
326
327 if (sc->flags.copy_event_pending) {
328 return;
329 }
330
331 if (sc->flags.store_copying) {
332 sc->flags.copy_event_pending = true;
333 debugs(90, 3, "storeClientCopy2: Queueing storeClientCopyEvent()");
334 eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0);
335 return;
336 }
337
338 debugs(90, 3, "storeClientCopy2: " << e->getMD5Text());
339 assert(sc->_callback.pending());
340 /*
341 * We used to check for ENTRY_ABORTED here. But there were some
342 * problems. For example, we might have a slow client (or two) and
343 * the peer server is reading far ahead and swapping to disk. Even
344 * if the peer aborts, we want to give the client(s)
345 * everything we got before the abort condition occurred.
346 */
347 /* Warning: doCopy may indirectly free itself in callbacks,
348 * hence the lock to keep it active for the duration of
349 * this function
350 * XXX: Locking does not prevent calling sc destructor (it only prevents
351 * freeing sc memory) so sc may become invalid from C++ p.o.v.
352 */
353 CbcPointer<store_client> tmpLock = sc;
354 assert (!sc->flags.store_copying);
355 sc->doCopy(e);
356 assert(!sc->flags.store_copying);
357 }
358
359 void
360 store_client::doCopy(StoreEntry *anEntry)
361 {
362 assert (anEntry == entry);
363 flags.store_copying = true;
364 MemObject *mem = entry->mem_obj;
365
366 debugs(33, 5, "store_client::doCopy: co: " <<
367 copyInto.offset << ", hi: " <<
368 mem->endOffset());
369
370 if (!moreToSend()) {
371 /* There is no more to send! */
372 debugs(33, 3, HERE << "There is no more to send!");
373 callback(0);
374 flags.store_copying = false;
375 return;
376 }
377
378 /* Check that we actually have data */
379 if (anEntry->store_status == STORE_PENDING && copyInto.offset >= mem->endOffset()) {
380 debugs(90, 3, "store_client::doCopy: Waiting for more");
381 flags.store_copying = false;
382 return;
383 }
384
385 /*
386 * Slight weirdness here. We open a swapin file for any
387 * STORE_DISK_CLIENT, even if we can copy the requested chunk
388 * from memory in the next block. We must try to open the
389 * swapin file before sending any data to the client side. If
390 * we postpone the open, and then can not open the file later
391 * on, the client loses big time. Its transfer just gets cut
392 * off. Better to open it early (while the client side handler
393 * is clientCacheHit) so that we can fall back to a cache miss
394 * if needed.
395 */
396
397 if (STORE_DISK_CLIENT == getType() && swapin_sio == NULL) {
398 if (!startSwapin())
399 return; // failure
400 }
401 scheduleRead();
402 }
403
404 /// opens the swapin "file" if possible; otherwise, fail()s and returns false
405 bool
406 store_client::startSwapin()
407 {
408 debugs(90, 3, "store_client::doCopy: Need to open swap in file");
409 /* gotta open the swapin file */
410
411 if (storeTooManyDiskFilesOpen()) {
412 /* yuck -- this causes a TCP_SWAPFAIL_MISS on the client side */
413 fail();
414 flags.store_copying = false;
415 return false;
416 } else if (!flags.disk_io_pending) {
417 /* Don't set store_io_pending here */
418 storeSwapInStart(this);
419
420 if (swapin_sio == NULL) {
421 fail();
422 flags.store_copying = false;
423 return false;
424 }
425
426 return true;
427 } else {
428 debugs(90, DBG_IMPORTANT, "WARNING: Averted multiple fd operation (1)");
429 flags.store_copying = false;
430 return false;
431 }
432 }
433
434 void
435 store_client::scheduleRead()
436 {
437 MemObject *mem = entry->mem_obj;
438
439 if (copyInto.offset >= mem->inmem_lo && copyInto.offset < mem->endOffset())
440 scheduleMemRead();
441 else
442 scheduleDiskRead();
443 }
444
445 void
446 store_client::scheduleDiskRead()
447 {
448 /* What the client wants is not in memory. Schedule a disk read */
449 if (getType() == STORE_DISK_CLIENT) {
450 // we should have called startSwapin() already
451 assert(swapin_sio != NULL);
452 } else if (!swapin_sio && !startSwapin()) {
453 debugs(90, 3, "bailing after swapin start failure for " << *entry);
454 assert(!flags.store_copying);
455 return;
456 }
457
458 assert(!flags.disk_io_pending);
459
460 debugs(90, 3, "reading " << *entry << " from disk");
461
462 fileRead();
463
464 flags.store_copying = false;
465 }
466
467 void
468 store_client::scheduleMemRead()
469 {
470 /* What the client wants is in memory */
471 /* Old style */
472 debugs(90, 3, "store_client::doCopy: Copying normal from memory");
473 size_t sz = entry->mem_obj->data_hdr.copy(copyInto);
474 callback(sz);
475 flags.store_copying = false;
476 }
477
478 void
479 store_client::fileRead()
480 {
481 MemObject *mem = entry->mem_obj;
482
483 assert(_callback.pending());
484 assert(!flags.disk_io_pending);
485 flags.disk_io_pending = true;
486
487 if (mem->swap_hdr_sz != 0)
488 if (entry->swappingOut())
489 assert(mem->swapout.sio->offset() > copyInto.offset + (int64_t)mem->swap_hdr_sz);
490
491 storeRead(swapin_sio,
492 copyInto.data,
493 copyInto.length,
494 copyInto.offset + mem->swap_hdr_sz,
495 mem->swap_hdr_sz == 0 ? storeClientReadHeader
496 : storeClientReadBody,
497 this);
498 }
499
500 void
501 store_client::readBody(const char *, ssize_t len)
502 {
503 int parsed_header = 0;
504
505 // Don't assert disk_io_pending here.. may be called by read_header
506 flags.disk_io_pending = false;
507 assert(_callback.pending());
508 debugs(90, 3, "storeClientReadBody: len " << len << "");
509
510 if (len < 0)
511 return fail();
512
513 const auto rep = entry->mem_obj ? &entry->mem().baseReply() : nullptr;
514 if (copyInto.offset == 0 && len > 0 && rep && rep->sline.status() == Http::scNone) {
515 /* Our structure ! */
516 if (!entry->mem_obj->adjustableBaseReply().parseCharBuf(copyInto.data, headersEnd(copyInto.data, len))) {
517 debugs(90, DBG_CRITICAL, "Could not parse headers from on disk object");
518 } else {
519 parsed_header = 1;
520 }
521 }
522
523 if (len > 0 && rep && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
524 storeGetMemSpace(len);
525 // The above may start to free our object so we need to check again
526 if (entry->mem_obj->inmem_lo == 0) {
527 /* Copy read data back into memory.
528 * copyInto.offset includes headers, which is what mem cache needs
529 */
530 int64_t mem_offset = entry->mem_obj->endOffset();
531 if ((copyInto.offset == mem_offset) || (parsed_header && mem_offset == rep->hdr_sz)) {
532 entry->mem_obj->write(StoreIOBuffer(len, copyInto.offset, copyInto.data));
533 }
534 }
535 }
536
537 callback(len);
538 }
539
540 void
541 store_client::fail()
542 {
543 object_ok = false;
544 /* synchronous open failures callback from the store,
545 * before startSwapin detects the failure.
546 * TODO: fix this inconsistent behaviour - probably by
547 * having storeSwapInStart become a callback functions,
548 * not synchronous
549 */
550
551 if (_callback.pending())
552 callback(0, true);
553 }
554
555 static void
556 storeClientReadHeader(void *data, const char *buf, ssize_t len, StoreIOState::Pointer)
557 {
558 store_client *sc = (store_client *)data;
559 sc->readHeader(buf, len);
560 }
561
562 static void
563 storeClientReadBody(void *data, const char *buf, ssize_t len, StoreIOState::Pointer)
564 {
565 store_client *sc = (store_client *)data;
566 sc->readBody(buf, len);
567 }
568
569 bool
570 store_client::unpackHeader(char const *buf, ssize_t len)
571 {
572 debugs(90, 3, "store_client::unpackHeader: len " << len << "");
573 assert(len >= 0);
574
575 int swap_hdr_sz = 0;
576 tlv *tlv_list = nullptr;
577 try {
578 StoreMetaUnpacker aBuilder(buf, len, &swap_hdr_sz);
579 tlv_list = aBuilder.createStoreMeta();
580 } catch (const std::exception &e) {
581 debugs(90, DBG_IMPORTANT, "WARNING: failed to unpack metadata because " << e.what());
582 return false;
583 }
584 assert(tlv_list);
585
586 /*
587 * Check the meta data and make sure we got the right object.
588 */
589 for (tlv *t = tlv_list; t; t = t->next) {
590 if (!t->checkConsistency(entry)) {
591 storeSwapTLVFree(tlv_list);
592 return false;
593 }
594 }
595
596 storeSwapTLVFree(tlv_list);
597
598 assert(swap_hdr_sz >= 0);
599 entry->mem_obj->swap_hdr_sz = swap_hdr_sz;
600 if (entry->swap_file_sz > 0) { // collapsed hits may not know swap_file_sz
601 assert(entry->swap_file_sz >= static_cast<uint64_t>(swap_hdr_sz));
602 entry->mem_obj->object_sz = entry->swap_file_sz - swap_hdr_sz;
603 }
604 debugs(90, 5, "store_client::unpackHeader: swap_file_sz=" <<
605 entry->swap_file_sz << "( " << swap_hdr_sz << " + " <<
606 entry->mem_obj->object_sz << ")");
607 return true;
608 }
609
610 void
611 store_client::readHeader(char const *buf, ssize_t len)
612 {
613 MemObject *const mem = entry->mem_obj;
614
615 assert(flags.disk_io_pending);
616 flags.disk_io_pending = false;
617 assert(_callback.pending());
618
619 // abort if we fail()'d earlier
620 if (!object_ok)
621 return;
622
623 if (len < 0)
624 return fail();
625
626 if (!unpackHeader(buf, len)) {
627 fail();
628 return;
629 }
630
631 /*
632 * If our last read got some data the client wants, then give
633 * it to them, otherwise schedule another read.
634 */
635 size_t body_sz = len - mem->swap_hdr_sz;
636
637 if (copyInto.offset < static_cast<int64_t>(body_sz)) {
638 /*
639 * we have (part of) what they want
640 */
641 size_t copy_sz = min(copyInto.length, body_sz);
642 debugs(90, 3, "storeClientReadHeader: copying " << copy_sz << " bytes of body");
643 memmove(copyInto.data, copyInto.data + mem->swap_hdr_sz, copy_sz);
644
645 readBody(copyInto.data, copy_sz);
646
647 return;
648 }
649
650 /*
651 * we don't have what the client wants, but at least we now
652 * know the swap header size.
653 */
654 fileRead();
655 }
656
657 int
658 storeClientCopyPending(store_client * sc, StoreEntry * e, void *data)
659 {
660 #if STORE_CLIENT_LIST_DEBUG
661 assert(sc == storeClientListSearch(e->mem_obj, data));
662 #endif
663 #ifndef SILLY_CODE
664
665 assert(sc);
666 #endif
667
668 assert(sc->entry == e);
669 #if SILLY_CODE
670
671 if (sc == NULL)
672 return 0;
673
674 #endif
675
676 if (!sc->_callback.pending())
677 return 0;
678
679 return 1;
680 }
681
682 /*
683 * This routine hasn't been optimised to take advantage of the
684 * passed sc. Yet.
685 */
686 int
687 storeUnregister(store_client * sc, StoreEntry * e, void *data)
688 {
689 MemObject *mem = e->mem_obj;
690 #if STORE_CLIENT_LIST_DEBUG
691
692 assert(sc == storeClientListSearch(e->mem_obj, data));
693 #endif
694
695 if (mem == NULL)
696 return 0;
697
698 debugs(90, 3, "storeUnregister: called for '" << e->getMD5Text() << "'");
699
700 if (sc == NULL) {
701 debugs(90, 3, "storeUnregister: No matching client for '" << e->getMD5Text() << "'");
702 return 0;
703 }
704
705 if (mem->clientCount() == 0) {
706 debugs(90, 3, "storeUnregister: Consistency failure - store client being unregistered is not in the mem object's list for '" << e->getMD5Text() << "'");
707 return 0;
708 }
709
710 dlinkDelete(&sc->node, &mem->clients);
711 -- mem->nclients;
712
713 const auto swapoutFinished = e->swappedOut() || e->swapoutFailed();
714 if (e->store_status == STORE_OK && !swapoutFinished)
715 e->swapOut();
716
717 if (sc->swapin_sio != NULL) {
718 storeClose(sc->swapin_sio, StoreIOState::readerDone);
719 sc->swapin_sio = NULL;
720 ++statCounter.swap.ins;
721 }
722
723 if (sc->_callback.pending()) {
724 /* callback with ssize = -1 to indicate unexpected termination */
725 debugs(90, 3, "store_client for " << *e << " has a callback");
726 sc->fail();
727 }
728
729 #if STORE_CLIENT_LIST_DEBUG
730 cbdataReferenceDone(sc->owner);
731
732 #endif
733
734 delete sc;
735
736 assert(e->locked());
737 // An entry locked by others may be unlocked (and destructed) by others, so
738 // we must lock again to safely dereference e after CheckQuickAbortIsReasonable().
739 e->lock("storeUnregister");
740
741 if (CheckQuickAbortIsReasonable(e))
742 e->abort();
743 else
744 mem->kickReads();
745
746 #if USE_ADAPTATION
747 e->kickProducer();
748 #endif
749
750 e->unlock("storeUnregister");
751 return 1;
752 }
753
754 /* Call handlers waiting for data to be appended to E. */
755 void
756 StoreEntry::invokeHandlers()
757 {
758 if (EBIT_TEST(flags, DELAY_SENDING)) {
759 debugs(90, 3, "DELAY_SENDING is on, exiting " << *this);
760 return;
761 }
762 if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT)) {
763 debugs(90, 3, "ENTRY_FWD_HDR_WAIT is on, exiting " << *this);
764 return;
765 }
766
767 /* Commit what we can to disk, if appropriate */
768 swapOut();
769 int i = 0;
770 store_client *sc;
771 dlink_node *nx = NULL;
772 dlink_node *node;
773
774 PROF_start(InvokeHandlers);
775
776 debugs(90, 3, mem_obj->nclients << " clients; " << *this << ' ' << getMD5Text());
777 /* walk the entire list looking for valid callbacks */
778
779 const auto savedContext = CodeContext::Current();
780 for (node = mem_obj->clients.head; node; node = nx) {
781 sc = (store_client *)node->data;
782 nx = node->next;
783 ++i;
784
785 if (!sc->_callback.pending())
786 continue;
787
788 if (sc->flags.disk_io_pending)
789 continue;
790
791 CodeContext::Reset(sc->_callback.codeContext);
792 debugs(90, 3, "checking client #" << i);
793 storeClientCopy2(this, sc);
794 }
795 CodeContext::Reset(savedContext);
796 PROF_stop(InvokeHandlers);
797 }
798
799 // Does not account for remote readers/clients.
800 int
801 storePendingNClients(const StoreEntry * e)
802 {
803 MemObject *mem = e->mem_obj;
804 int npend = NULL == mem ? 0 : mem->nclients;
805 debugs(90, 3, "storePendingNClients: returning " << npend);
806 return npend;
807 }
808
809 /* return true if the request should be aborted */
810 static bool
811 CheckQuickAbortIsReasonable(StoreEntry * entry)
812 {
813 assert(entry);
814 debugs(90, 3, "entry=" << *entry);
815
816 if (storePendingNClients(entry) > 0) {
817 debugs(90, 3, "quick-abort? NO storePendingNClients() > 0");
818 return false;
819 }
820
821 if (!shutting_down && Store::Root().transientReaders(*entry)) {
822 debugs(90, 3, "quick-abort? NO still have one or more transient readers");
823 return false;
824 }
825
826 if (entry->store_status != STORE_PENDING) {
827 debugs(90, 3, "quick-abort? NO store_status != STORE_PENDING");
828 return false;
829 }
830
831 if (EBIT_TEST(entry->flags, ENTRY_SPECIAL)) {
832 debugs(90, 3, "quick-abort? NO ENTRY_SPECIAL");
833 return false;
834 }
835
836 MemObject * const mem = entry->mem_obj;
837 assert(mem);
838 debugs(90, 3, "mem=" << mem);
839
840 if (mem->request && !mem->request->flags.cachable) {
841 debugs(90, 3, "quick-abort? YES !mem->request->flags.cachable");
842 return true;
843 }
844
845 if (EBIT_TEST(entry->flags, KEY_PRIVATE)) {
846 debugs(90, 3, "quick-abort? YES KEY_PRIVATE");
847 return true;
848 }
849
850 const auto &reply = mem->baseReply();
851
852 if (reply.hdr_sz <= 0) {
853 // TODO: Check whether this condition works for HTTP/0 responses.
854 debugs(90, 3, "quick-abort? YES no object data received yet");
855 return true;
856 }
857
858 if (Config.quickAbort.min < 0) {
859 debugs(90, 3, "quick-abort? NO disabled");
860 return false;
861 }
862
863 if (mem->request && mem->request->range && mem->request->getRangeOffsetLimit() < 0) {
864 // the admin has configured "range_offset_limit none"
865 debugs(90, 3, "quick-abort? NO admin configured range replies to full-download");
866 return false;
867 }
868
869 if (reply.content_length < 0) {
870 // XXX: cf.data.pre does not document what should happen in this case
871 // We know that quick_abort is enabled, but no limit can be applied.
872 debugs(90, 3, "quick-abort? YES unknown content length");
873 return true;
874 }
875 const auto expectlen = reply.hdr_sz + reply.content_length;
876
877 int64_t curlen = mem->endOffset();
878
879 if (curlen > expectlen) {
880 debugs(90, 3, "quick-abort? YES bad content length (" << curlen << " of " << expectlen << " bytes received)");
881 return true;
882 }
883
884 if ((expectlen - curlen) < (Config.quickAbort.min << 10)) {
885 debugs(90, 3, "quick-abort? NO only a little more object left to receive");
886 return false;
887 }
888
889 if ((expectlen - curlen) > (Config.quickAbort.max << 10)) {
890 debugs(90, 3, "quick-abort? YES too much left to go");
891 return true;
892 }
893
894 // XXX: This is absurd! TODO: For positives, "a/(b/c) > d" is "a*c > b*d".
895 if (expectlen < 100) {
896 debugs(90, 3, "quick-abort? NO avoid FPE");
897 return false;
898 }
899
900 if ((curlen / (expectlen / 100)) > (Config.quickAbort.pct)) {
901 debugs(90, 3, "quick-abort? NO past point of no return");
902 return false;
903 }
904
905 debugs(90, 3, "quick-abort? YES default");
906 return true;
907 }
908
909 void
910 store_client::dumpStats(MemBuf * output, int clientNumber) const
911 {
912 if (_callback.pending())
913 return;
914
915 output->appendf("\tClient #%d, %p\n", clientNumber, _callback.callback_data);
916 output->appendf("\t\tcopy_offset: %" PRId64 "\n", copyInto.offset);
917 output->appendf("\t\tcopy_size: %" PRIuSIZE "\n", copyInto.length);
918 output->append("\t\tflags:", 8);
919
920 if (flags.disk_io_pending)
921 output->append(" disk_io_pending", 16);
922
923 if (flags.store_copying)
924 output->append(" store_copying", 14);
925
926 if (flags.copy_event_pending)
927 output->append(" copy_event_pending", 19);
928
929 output->append("\n",1);
930 }
931
932 bool
933 store_client::Callback::pending() const
934 {
935 return callback_handler && callback_data;
936 }
937
938 store_client::Callback::Callback(STCB *function, void *data):
939 callback_handler(function),
940 callback_data(data),
941 codeContext(CodeContext::Current())
942 {
943 }
944
945 #if USE_DELAY_POOLS
946 void
947 store_client::setDelayId(DelayId delay_id)
948 {
949 delayId = delay_id;
950 }
951 #endif
952