]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store_client.cc
Rename Packable::Printf as Packable::appendf
[thirdparty/squid.git] / src / store_client.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 90 Storage Manager Client-Side Interface */
10
11 #include "squid.h"
12 #include "event.h"
13 #include "globals.h"
14 #include "HttpReply.h"
15 #include "HttpRequest.h"
16 #include "MemBuf.h"
17 #include "MemObject.h"
18 #include "mime_header.h"
19 #include "profiler/Profiler.h"
20 #include "SquidConfig.h"
21 #include "StatCounters.h"
22 #include "Store.h"
23 #include "store_swapin.h"
24 #include "StoreClient.h"
25 #include "StoreMeta.h"
26 #include "StoreMetaUnpacker.h"
27 #if USE_DELAY_POOLS
28 #include "DelayPools.h"
29 #endif
30
31 /*
32 * NOTE: 'Header' refers to the swapfile metadata header.
33 * 'OBJHeader' refers to the object header, with cannonical
34 * processed object headers (which may derive from FTP/HTTP etc
35 * upstream protocols
36 * 'Body' refers to the swapfile body, which is the full
37 * HTTP reply (including HTTP headers and body).
38 */
39 static StoreIOState::STRCB storeClientReadBody;
40 static StoreIOState::STRCB storeClientReadHeader;
41 static void storeClientCopy2(StoreEntry * e, store_client * sc);
42 static EVH storeClientCopyEvent;
43 static bool CheckQuickAbortIsReasonable(StoreEntry * entry);
44 static void CheckQuickAbort(StoreEntry * entry);
45
46 CBDATA_CLASS_INIT(store_client);
47
48 bool
49 store_client::memReaderHasLowerOffset(int64_t anOffset) const
50 {
51 return getType() == STORE_MEM_CLIENT && copyInto.offset < anOffset;
52 }
53
54 int
55 store_client::getType() const
56 {
57 return type;
58 }
59
60 #if STORE_CLIENT_LIST_DEBUG
61 static store_client *
62 storeClientListSearch(const MemObject * mem, void *data)
63 {
64 dlink_node *node;
65 store_client *sc = NULL;
66
67 for (node = mem->clients.head; node; node = node->next) {
68 sc = node->data;
69
70 if (sc->owner == data)
71 return sc;
72 }
73
74 return NULL;
75 }
76
77 int
78 storeClientIsThisAClient(store_client * sc, void *someClient)
79 {
80 return sc->owner == someClient;
81 }
82
83 #endif
84 #include "HttpRequest.h"
85
86 /* add client with fd to client list */
87 store_client *
88 storeClientListAdd(StoreEntry * e, void *data)
89 {
90 MemObject *mem = e->mem_obj;
91 store_client *sc;
92 assert(mem);
93 #if STORE_CLIENT_LIST_DEBUG
94
95 if (storeClientListSearch(mem, data) != NULL)
96 /* XXX die! */
97 assert(1 == 0);
98
99 #endif
100
101 sc = new store_client (e);
102
103 mem->addClient(sc);
104
105 return sc;
106 }
107
108 void
109 store_client::callback(ssize_t sz, bool error)
110 {
111 size_t bSz = 0;
112
113 if (sz >= 0 && !error)
114 bSz = sz;
115
116 StoreIOBuffer result(bSz, 0 ,copyInto.data);
117
118 if (sz < 0 || error)
119 result.flags.error = 1;
120
121 result.offset = cmp_offset;
122 assert(_callback.pending());
123 cmp_offset = copyInto.offset + bSz;
124 STCB *temphandler = _callback.callback_handler;
125 void *cbdata = _callback.callback_data;
126 _callback = Callback(NULL, NULL);
127 copyInto.data = NULL;
128
129 if (cbdataReferenceValid(cbdata))
130 temphandler(cbdata, result);
131
132 cbdataReferenceDone(cbdata);
133 }
134
135 static void
136 storeClientCopyEvent(void *data)
137 {
138 store_client *sc = (store_client *)data;
139 debugs(90, 3, "storeClientCopyEvent: Running");
140 assert (sc->flags.copy_event_pending);
141 sc->flags.copy_event_pending = false;
142
143 if (!sc->_callback.pending())
144 return;
145
146 storeClientCopy2(sc->entry, sc);
147 }
148
149 store_client::store_client(StoreEntry *e) : entry (e)
150 #if USE_DELAY_POOLS
151 , delayId()
152 #endif
153 , type (e->storeClientType())
154 , object_ok(true)
155 {
156 cmp_offset = 0;
157 flags.disk_io_pending = false;
158 ++ entry->refcount;
159
160 if (getType() == STORE_DISK_CLIENT)
161 /* assert we'll be able to get the data we want */
162 /* maybe we should open swapin_sio here */
163 assert(entry->swap_filen > -1 || entry->swappingOut());
164
165 #if STORE_CLIENT_LIST_DEBUG
166
167 owner = cbdataReference(data);
168
169 #endif
170 }
171
172 store_client::~store_client()
173 {}
174
175 /* copy bytes requested by the client */
176 void
177 storeClientCopy(store_client * sc,
178 StoreEntry * e,
179 StoreIOBuffer copyInto,
180 STCB * callback,
181 void *data)
182 {
183 assert (sc != NULL);
184 sc->copy(e, copyInto,callback,data);
185 }
186
187 void
188 store_client::copy(StoreEntry * anEntry,
189 StoreIOBuffer copyRequest,
190 STCB * callback_fn,
191 void *data)
192 {
193 assert (anEntry == entry);
194 assert (callback_fn);
195 assert (data);
196 assert(!EBIT_TEST(entry->flags, ENTRY_ABORTED));
197 debugs(90, 3, "store_client::copy: " << entry->getMD5Text() << ", from " <<
198 copyRequest.offset << ", for length " <<
199 (int) copyRequest.length << ", cb " << callback_fn << ", cbdata " <<
200 data);
201
202 #if STORE_CLIENT_LIST_DEBUG
203
204 assert(this == storeClientListSearch(entry->mem_obj, data));
205 #endif
206
207 assert(!_callback.pending());
208 #if ONLYCONTIGUOUSREQUESTS
209
210 assert(cmp_offset == copyRequest.offset);
211 #endif
212 /* range requests will skip into the body */
213 cmp_offset = copyRequest.offset;
214 _callback = Callback (callback_fn, cbdataReference(data));
215 copyInto.data = copyRequest.data;
216 copyInto.length = copyRequest.length;
217 copyInto.offset = copyRequest.offset;
218
219 static bool copying (false);
220 assert (!copying);
221 copying = true;
222 PROF_start(storeClient_kickReads);
223 /* we might be blocking comm reads due to readahead limits
224 * now we have a new offset, trigger those reads...
225 */
226 entry->mem_obj->kickReads();
227 PROF_stop(storeClient_kickReads);
228 copying = false;
229
230 anEntry->lock("store_client::copy"); // see deletion note below
231
232 storeClientCopy2(entry, this);
233
234 // Bug 3480: This store_client object may be deleted now if, for example,
235 // the client rejects the hit response copied above. Use on-stack pointers!
236
237 #if USE_ADAPTATION
238 anEntry->kickProducer();
239 #endif
240 anEntry->unlock("store_client::copy");
241
242 // Add no code here. This object may no longer exist.
243 }
244
245 /// Whether there is (or will be) more entry data for us.
246 bool
247 store_client::moreToSend() const
248 {
249 if (entry->store_status == STORE_PENDING)
250 return true; // there may be more coming
251
252 /* STORE_OK, including aborted entries: no more data is coming */
253
254 const int64_t len = entry->objectLen();
255
256 // If we do not know the entry length, then we have to open the swap file.
257 const bool canSwapIn = entry->swap_filen >= 0;
258 if (len < 0)
259 return canSwapIn;
260
261 if (copyInto.offset >= len)
262 return false; // sent everything there is
263
264 if (canSwapIn)
265 return true; // if we lack prefix, we can swap it in
266
267 // If we cannot swap in, make sure we have what we want in RAM. Otherwise,
268 // scheduleRead calls scheduleDiskRead which asserts without a swap file.
269 const MemObject *mem = entry->mem_obj;
270 return mem &&
271 mem->inmem_lo <= copyInto.offset && copyInto.offset < mem->endOffset();
272 }
273
274 static void
275 storeClientCopy2(StoreEntry * e, store_client * sc)
276 {
277 /* reentrancy not allowed - note this could lead to
278 * dropped events
279 */
280
281 if (sc->flags.copy_event_pending) {
282 return;
283 }
284
285 if (EBIT_TEST(e->flags, ENTRY_FWD_HDR_WAIT)) {
286 debugs(90, 5, "storeClientCopy2: returning because ENTRY_FWD_HDR_WAIT set");
287 return;
288 }
289
290 if (sc->flags.store_copying) {
291 sc->flags.copy_event_pending = true;
292 debugs(90, 3, "storeClientCopy2: Queueing storeClientCopyEvent()");
293 eventAdd("storeClientCopyEvent", storeClientCopyEvent, sc, 0.0, 0);
294 return;
295 }
296
297 debugs(90, 3, "storeClientCopy2: " << e->getMD5Text());
298 assert(sc->_callback.pending());
299 /*
300 * We used to check for ENTRY_ABORTED here. But there were some
301 * problems. For example, we might have a slow client (or two) and
302 * the peer server is reading far ahead and swapping to disk. Even
303 * if the peer aborts, we want to give the client(s)
304 * everything we got before the abort condition occurred.
305 */
306 /* Warning: doCopy may indirectly free itself in callbacks,
307 * hence the lock to keep it active for the duration of
308 * this function
309 * XXX: Locking does not prevent calling sc destructor (it only prevents
310 * freeing sc memory) so sc may become invalid from C++ p.o.v.
311 */
312 CbcPointer<store_client> tmpLock = sc;
313 assert (!sc->flags.store_copying);
314 sc->doCopy(e);
315 assert(!sc->flags.store_copying);
316 }
317
318 void
319 store_client::doCopy(StoreEntry *anEntry)
320 {
321 assert (anEntry == entry);
322 flags.store_copying = true;
323 MemObject *mem = entry->mem_obj;
324
325 debugs(33, 5, "store_client::doCopy: co: " <<
326 copyInto.offset << ", hi: " <<
327 mem->endOffset());
328
329 if (!moreToSend()) {
330 /* There is no more to send! */
331 debugs(33, 3, HERE << "There is no more to send!");
332 callback(0);
333 flags.store_copying = false;
334 return;
335 }
336
337 /* Check that we actually have data */
338 if (anEntry->store_status == STORE_PENDING && copyInto.offset >= mem->endOffset()) {
339 debugs(90, 3, "store_client::doCopy: Waiting for more");
340 flags.store_copying = false;
341 return;
342 }
343
344 /*
345 * Slight weirdness here. We open a swapin file for any
346 * STORE_DISK_CLIENT, even if we can copy the requested chunk
347 * from memory in the next block. We must try to open the
348 * swapin file before sending any data to the client side. If
349 * we postpone the open, and then can not open the file later
350 * on, the client loses big time. Its transfer just gets cut
351 * off. Better to open it early (while the client side handler
352 * is clientCacheHit) so that we can fall back to a cache miss
353 * if needed.
354 */
355
356 if (STORE_DISK_CLIENT == getType() && swapin_sio == NULL) {
357 if (!startSwapin())
358 return; // failure
359 }
360 scheduleRead();
361 }
362
363 /// opens the swapin "file" if possible; otherwise, fail()s and returns false
364 bool
365 store_client::startSwapin()
366 {
367 debugs(90, 3, "store_client::doCopy: Need to open swap in file");
368 /* gotta open the swapin file */
369
370 if (storeTooManyDiskFilesOpen()) {
371 /* yuck -- this causes a TCP_SWAPFAIL_MISS on the client side */
372 fail();
373 flags.store_copying = false;
374 return false;
375 } else if (!flags.disk_io_pending) {
376 /* Don't set store_io_pending here */
377 storeSwapInStart(this);
378
379 if (swapin_sio == NULL) {
380 fail();
381 flags.store_copying = false;
382 return false;
383 }
384
385 return true;
386 } else {
387 debugs(90, DBG_IMPORTANT, "WARNING: Averted multiple fd operation (1)");
388 flags.store_copying = false;
389 return false;
390 }
391 }
392
393 void
394 store_client::scheduleRead()
395 {
396 MemObject *mem = entry->mem_obj;
397
398 if (copyInto.offset >= mem->inmem_lo && copyInto.offset < mem->endOffset())
399 scheduleMemRead();
400 else
401 scheduleDiskRead();
402 }
403
404 void
405 store_client::scheduleDiskRead()
406 {
407 /* What the client wants is not in memory. Schedule a disk read */
408 if (getType() == STORE_DISK_CLIENT) {
409 // we should have called startSwapin() already
410 assert(swapin_sio != NULL);
411 } else if (!swapin_sio && !startSwapin()) {
412 debugs(90, 3, "bailing after swapin start failure for " << *entry);
413 assert(!flags.store_copying);
414 return;
415 }
416
417 assert(!flags.disk_io_pending);
418
419 debugs(90, 3, "reading " << *entry << " from disk");
420
421 fileRead();
422
423 flags.store_copying = false;
424 }
425
426 void
427 store_client::scheduleMemRead()
428 {
429 /* What the client wants is in memory */
430 /* Old style */
431 debugs(90, 3, "store_client::doCopy: Copying normal from memory");
432 size_t sz = entry->mem_obj->data_hdr.copy(copyInto);
433 callback(sz);
434 flags.store_copying = false;
435 }
436
437 void
438 store_client::fileRead()
439 {
440 MemObject *mem = entry->mem_obj;
441
442 assert(_callback.pending());
443 assert(!flags.disk_io_pending);
444 flags.disk_io_pending = true;
445
446 if (mem->swap_hdr_sz != 0)
447 if (entry->swap_status == SWAPOUT_WRITING)
448 assert(mem->swapout.sio->offset() > copyInto.offset + (int64_t)mem->swap_hdr_sz);
449
450 storeRead(swapin_sio,
451 copyInto.data,
452 copyInto.length,
453 copyInto.offset + mem->swap_hdr_sz,
454 mem->swap_hdr_sz == 0 ? storeClientReadHeader
455 : storeClientReadBody,
456 this);
457 }
458
459 void
460 store_client::readBody(const char *, ssize_t len)
461 {
462 int parsed_header = 0;
463
464 // Don't assert disk_io_pending here.. may be called by read_header
465 flags.disk_io_pending = false;
466 assert(_callback.pending());
467 debugs(90, 3, "storeClientReadBody: len " << len << "");
468
469 if (copyInto.offset == 0 && len > 0 && entry->getReply()->sline.status() == Http::scNone) {
470 /* Our structure ! */
471 HttpReply *rep = (HttpReply *) entry->getReply(); // bypass const
472
473 if (!rep->parseCharBuf(copyInto.data, headersEnd(copyInto.data, len))) {
474 debugs(90, DBG_CRITICAL, "Could not parse headers from on disk object");
475 } else {
476 parsed_header = 1;
477 }
478 }
479
480 const HttpReply *rep = entry->getReply();
481 if (len > 0 && rep && entry->mem_obj->inmem_lo == 0 && entry->objectLen() <= (int64_t)Config.Store.maxInMemObjSize && Config.onoff.memory_cache_disk) {
482 storeGetMemSpace(len);
483 // The above may start to free our object so we need to check again
484 if (entry->mem_obj->inmem_lo == 0) {
485 /* Copy read data back into memory.
486 * copyInto.offset includes headers, which is what mem cache needs
487 */
488 int64_t mem_offset = entry->mem_obj->endOffset();
489 if ((copyInto.offset == mem_offset) || (parsed_header && mem_offset == rep->hdr_sz)) {
490 entry->mem_obj->write(StoreIOBuffer(len, copyInto.offset, copyInto.data));
491 }
492 }
493 }
494
495 callback(len);
496 }
497
498 void
499 store_client::fail()
500 {
501 object_ok = false;
502 /* synchronous open failures callback from the store,
503 * before startSwapin detects the failure.
504 * TODO: fix this inconsistent behaviour - probably by
505 * having storeSwapInStart become a callback functions,
506 * not synchronous
507 */
508
509 if (_callback.pending())
510 callback(0, true);
511 }
512
513 static void
514 storeClientReadHeader(void *data, const char *buf, ssize_t len, StoreIOState::Pointer)
515 {
516 store_client *sc = (store_client *)data;
517 sc->readHeader(buf, len);
518 }
519
520 static void
521 storeClientReadBody(void *data, const char *buf, ssize_t len, StoreIOState::Pointer)
522 {
523 store_client *sc = (store_client *)data;
524 sc->readBody(buf, len);
525 }
526
527 void
528 store_client::unpackHeader(char const *buf, ssize_t len)
529 {
530 debugs(90, 3, "store_client::unpackHeader: len " << len << "");
531
532 if (len < 0) {
533 debugs(90, 3, "store_client::unpackHeader: " << xstrerror() << "");
534 fail();
535 return;
536 }
537
538 int swap_hdr_sz = 0;
539 StoreMetaUnpacker aBuilder(buf, len, &swap_hdr_sz);
540
541 if (!aBuilder.isBufferSane()) {
542 /* oops, bad disk file? */
543 debugs(90, DBG_IMPORTANT, "WARNING: swapfile header inconsistent with available data");
544 fail();
545 return;
546 }
547
548 tlv *tlv_list = aBuilder.createStoreMeta ();
549
550 if (tlv_list == NULL) {
551 debugs(90, DBG_IMPORTANT, "WARNING: failed to unpack meta data");
552 fail();
553 return;
554 }
555
556 /*
557 * Check the meta data and make sure we got the right object.
558 */
559 for (tlv *t = tlv_list; t; t = t->next) {
560 if (!t->checkConsistency(entry)) {
561 storeSwapTLVFree(tlv_list);
562 fail();
563 return;
564 }
565 }
566
567 storeSwapTLVFree(tlv_list);
568
569 assert(swap_hdr_sz >= 0);
570 entry->mem_obj->swap_hdr_sz = swap_hdr_sz;
571 if (entry->swap_file_sz > 0) { // collapsed hits may not know swap_file_sz
572 assert(entry->swap_file_sz >= static_cast<uint64_t>(swap_hdr_sz));
573 entry->mem_obj->object_sz = entry->swap_file_sz - swap_hdr_sz;
574 }
575 debugs(90, 5, "store_client::unpackHeader: swap_file_sz=" <<
576 entry->swap_file_sz << "( " << swap_hdr_sz << " + " <<
577 entry->mem_obj->object_sz << ")");
578 }
579
580 void
581 store_client::readHeader(char const *buf, ssize_t len)
582 {
583 MemObject *const mem = entry->mem_obj;
584
585 assert(flags.disk_io_pending);
586 flags.disk_io_pending = false;
587 assert(_callback.pending());
588
589 unpackHeader (buf, len);
590
591 if (!object_ok)
592 return;
593
594 /*
595 * If our last read got some data the client wants, then give
596 * it to them, otherwise schedule another read.
597 */
598 size_t body_sz = len - mem->swap_hdr_sz;
599
600 if (copyInto.offset < static_cast<int64_t>(body_sz)) {
601 /*
602 * we have (part of) what they want
603 */
604 size_t copy_sz = min(copyInto.length, body_sz);
605 debugs(90, 3, "storeClientReadHeader: copying " << copy_sz << " bytes of body");
606 memmove(copyInto.data, copyInto.data + mem->swap_hdr_sz, copy_sz);
607
608 readBody(copyInto.data, copy_sz);
609
610 return;
611 }
612
613 /*
614 * we don't have what the client wants, but at least we now
615 * know the swap header size.
616 */
617 fileRead();
618 }
619
620 int
621 storeClientCopyPending(store_client * sc, StoreEntry * e, void *data)
622 {
623 #if STORE_CLIENT_LIST_DEBUG
624 assert(sc == storeClientListSearch(e->mem_obj, data));
625 #endif
626 #ifndef SILLY_CODE
627
628 assert(sc);
629 #endif
630
631 assert(sc->entry == e);
632 #if SILLY_CODE
633
634 if (sc == NULL)
635 return 0;
636
637 #endif
638
639 if (!sc->_callback.pending())
640 return 0;
641
642 return 1;
643 }
644
645 /*
646 * This routine hasn't been optimised to take advantage of the
647 * passed sc. Yet.
648 */
649 int
650 storeUnregister(store_client * sc, StoreEntry * e, void *data)
651 {
652 MemObject *mem = e->mem_obj;
653 #if STORE_CLIENT_LIST_DEBUG
654
655 assert(sc == storeClientListSearch(e->mem_obj, data));
656 #endif
657
658 if (mem == NULL)
659 return 0;
660
661 debugs(90, 3, "storeUnregister: called for '" << e->getMD5Text() << "'");
662
663 if (sc == NULL) {
664 debugs(90, 3, "storeUnregister: No matching client for '" << e->getMD5Text() << "'");
665 return 0;
666 }
667
668 if (mem->clientCount() == 0) {
669 debugs(90, 3, "storeUnregister: Consistency failure - store client being unregistered is not in the mem object's list for '" << e->getMD5Text() << "'");
670 return 0;
671 }
672
673 dlinkDelete(&sc->node, &mem->clients);
674 -- mem->nclients;
675
676 if (e->store_status == STORE_OK && e->swap_status != SWAPOUT_DONE)
677 e->swapOut();
678
679 if (sc->swapin_sio != NULL) {
680 storeClose(sc->swapin_sio, StoreIOState::readerDone);
681 sc->swapin_sio = NULL;
682 ++statCounter.swap.ins;
683 }
684
685 if (sc->_callback.pending()) {
686 /* callback with ssize = -1 to indicate unexpected termination */
687 debugs(90, 3, "store_client for " << *e << " has a callback");
688 sc->fail();
689 }
690
691 #if STORE_CLIENT_LIST_DEBUG
692 cbdataReferenceDone(sc->owner);
693
694 #endif
695
696 delete sc;
697
698 assert(e->locked());
699 // An entry locked by others may be unlocked (and destructed) by others, so
700 // we must lock again to safely dereference e after CheckQuickAbort().
701 e->lock("storeUnregister");
702
703 if (mem->nclients == 0)
704 CheckQuickAbort(e);
705 else
706 mem->kickReads();
707
708 #if USE_ADAPTATION
709 e->kickProducer();
710 #endif
711
712 e->unlock("storeUnregister");
713 return 1;
714 }
715
716 /* Call handlers waiting for data to be appended to E. */
717 void
718 StoreEntry::invokeHandlers()
719 {
720 /* Commit what we can to disk, if appropriate */
721 swapOut();
722 int i = 0;
723 store_client *sc;
724 dlink_node *nx = NULL;
725 dlink_node *node;
726
727 PROF_start(InvokeHandlers);
728
729 debugs(90, 3, "InvokeHandlers: " << getMD5Text() );
730 /* walk the entire list looking for valid callbacks */
731
732 for (node = mem_obj->clients.head; node; node = nx) {
733 sc = (store_client *)node->data;
734 nx = node->next;
735 debugs(90, 3, "StoreEntry::InvokeHandlers: checking client #" << i );
736 ++i;
737
738 if (!sc->_callback.pending())
739 continue;
740
741 if (sc->flags.disk_io_pending)
742 continue;
743
744 storeClientCopy2(this, sc);
745 }
746 PROF_stop(InvokeHandlers);
747 }
748
749 // Does not account for remote readers/clients.
750 int
751 storePendingNClients(const StoreEntry * e)
752 {
753 MemObject *mem = e->mem_obj;
754 int npend = NULL == mem ? 0 : mem->nclients;
755 debugs(90, 3, "storePendingNClients: returning " << npend);
756 return npend;
757 }
758
759 /* return true if the request should be aborted */
760 static bool
761 CheckQuickAbortIsReasonable(StoreEntry * entry)
762 {
763 MemObject * const mem = entry->mem_obj;
764 assert(mem);
765 debugs(90, 3, "entry=" << entry << ", mem=" << mem);
766
767 if (mem->request && !mem->request->flags.cachable) {
768 debugs(90, 3, "quick-abort? YES !mem->request->flags.cachable");
769 return true;
770 }
771
772 if (EBIT_TEST(entry->flags, KEY_PRIVATE)) {
773 debugs(90, 3, "quick-abort? YES KEY_PRIVATE");
774 return true;
775 }
776
777 int64_t expectlen = entry->getReply()->content_length + entry->getReply()->hdr_sz;
778
779 if (expectlen < 0) {
780 /* expectlen is < 0 if *no* information about the object has been received */
781 debugs(90, 3, "quick-abort? YES no object data received yet");
782 return true;
783 }
784
785 int64_t curlen = mem->endOffset();
786
787 if (Config.quickAbort.min < 0) {
788 debugs(90, 3, "quick-abort? NO disabled");
789 return false;
790 }
791
792 if (mem->request && mem->request->range && mem->request->getRangeOffsetLimit() < 0) {
793 /* Don't abort if the admin has configured range_ofset -1 to download fully for caching. */
794 debugs(90, 3, "quick-abort? NO admin configured range replies to full-download");
795 return false;
796 }
797
798 if (curlen > expectlen) {
799 debugs(90, 3, "quick-abort? YES bad content length (" << curlen << " of " << expectlen << " bytes received)");
800 return true;
801 }
802
803 if ((expectlen - curlen) < (Config.quickAbort.min << 10)) {
804 debugs(90, 3, "quick-abort? NO only a little more object left to receive");
805 return false;
806 }
807
808 if ((expectlen - curlen) > (Config.quickAbort.max << 10)) {
809 debugs(90, 3, "quick-abort? YES too much left to go");
810 return true;
811 }
812
813 if (expectlen < 100) {
814 debugs(90, 3, "quick-abort? NO avoid FPE");
815 return false;
816 }
817
818 if ((curlen / (expectlen / 100)) > (Config.quickAbort.pct)) {
819 debugs(90, 3, "quick-abort? NO past point of no return");
820 return false;
821 }
822
823 debugs(90, 3, "quick-abort? YES default");
824 return true;
825 }
826
827 /// Aborts a swapping-out entry if nobody needs it any more _and_
828 /// continuing swap out is not reasonable per CheckQuickAbortIsReasonable().
829 static void
830 CheckQuickAbort(StoreEntry * entry)
831 {
832 assert (entry);
833
834 if (storePendingNClients(entry) > 0)
835 return;
836
837 if (!shutting_down && Store::Root().transientReaders(*entry))
838 return;
839
840 if (entry->store_status != STORE_PENDING)
841 return;
842
843 if (EBIT_TEST(entry->flags, ENTRY_SPECIAL))
844 return;
845
846 if (!CheckQuickAbortIsReasonable(entry))
847 return;
848
849 entry->abort();
850 }
851
852 void
853 store_client::dumpStats(MemBuf * output, int clientNumber) const
854 {
855 if (_callback.pending())
856 return;
857
858 output->appendf("\tClient #%d, %p\n", clientNumber, _callback.callback_data);
859 output->appendf("\t\tcopy_offset: %" PRId64 "\n", copyInto.offset);
860 output->appendf("\t\tcopy_size: %d\n", copyInto.length);
861 output->append("\t\tflags:", 8);
862
863 if (flags.disk_io_pending)
864 output->append(" disk_io_pending", 16);
865
866 if (flags.store_copying)
867 output->append(" store_copying", 14);
868
869 if (flags.copy_event_pending)
870 output->append(" copy_event_pending", 19);
871
872 output->append("\n",1);
873 }
874
875 bool
876 store_client::Callback::pending() const
877 {
878 return callback_handler && callback_data;
879 }
880
881 store_client::Callback::Callback(STCB *function, void *data) : callback_handler(function), callback_data (data) {}
882
883 #if USE_DELAY_POOLS
884 void
885 store_client::setDelayId(DelayId delay_id)
886 {
887 delayId = delay_id;
888 }
889 #endif
890