2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 15 Neighbor Routines */
12 #include "acl/FilledChecklist.h"
13 #include "anyp/PortCfg.h"
14 #include "base/EnumIterator.h"
15 #include "base/IoManip.h"
16 #include "CacheDigest.h"
17 #include "CachePeer.h"
18 #include "comm/Connection.h"
19 #include "comm/ConnOpener.h"
20 #include "debug/Messages.h"
25 #include "HttpRequest.h"
26 #include "icmp/net_db.h"
29 #include "ip/Address.h"
32 #include "MemObject.h"
33 #include "mgr/Registration.h"
34 #include "multicast.h"
35 #include "neighbors.h"
36 #include "NeighborTypeDomainList.h"
38 #include "PeerDigest.h"
39 #include "PeerPoolMgr.h"
40 #include "PeerSelectState.h"
41 #include "RequestFlags.h"
42 #include "SquidConfig.h"
43 #include "SquidMath.h"
46 #include "store_key_md5.h"
49 /* count mcast group peers every 15 minutes */
50 #define MCAST_COUNT_RATE 900
52 bool peerAllowedToUse(const CachePeer
*, PeerSelector
*);
53 static int peerWouldBePinged(const CachePeer
*, PeerSelector
*);
54 static void neighborRemove(CachePeer
*);
55 static void neighborAlive(CachePeer
*, const MemObject
*, const icp_common_t
*);
57 static void neighborAliveHtcp(CachePeer
*, const MemObject
*, const HtcpReplyData
*);
59 static void neighborCountIgnored(CachePeer
*);
60 static void peerRefreshDNS(void *);
61 static IPH peerDNSConfigure
;
62 static void peerProbeConnect(CachePeer
*, const bool reprobeIfBusy
= false);
63 static CNCB peerProbeConnectDone
;
64 static void peerCountMcastPeersDone(void *data
);
65 static void peerCountMcastPeersStart(void *data
);
66 static void peerCountMcastPeersSchedule(CachePeer
* p
, time_t when
);
67 static void peerCountMcastPeersAbort(PeerSelector
*);
68 static void peerCountMcastPeersCreateAndSend(CachePeer
*p
);
69 static IRCB peerCountHandleIcpReply
;
71 static void neighborIgnoreNonPeer(const Ip::Address
&, icp_opcode
);
72 static OBJH neighborDumpPeers
;
73 static void dump_peers(StoreEntry
* sentry
, CachePeer
* peers
);
75 static unsigned short echo_port
;
77 static int NLateReplies
= 0;
78 static CachePeer
*first_ping
= nullptr;
81 neighborTypeStr(const CachePeer
* p
)
83 if (p
->type
== PEER_NONE
)
86 if (p
->type
== PEER_SIBLING
)
89 if (p
->type
== PEER_MULTICAST
)
90 return "Multicast Group";
96 whichPeer(const Ip::Address
&from
)
100 CachePeer
*p
= nullptr;
101 debugs(15, 3, "whichPeer: from " << from
);
103 for (p
= Config
.peers
; p
; p
= p
->next
) {
104 for (j
= 0; j
< p
->n_addresses
; ++j
) {
105 if (from
== p
->addresses
[j
] && from
.port() == p
->icp
.port
) {
115 neighborType(const CachePeer
* p
, const AnyP::Uri
&url
)
118 const NeighborTypeDomainList
*d
= nullptr;
120 for (d
= p
->typelist
; d
; d
= d
->next
) {
121 if (0 == matchDomainName(url
.host(), d
->domain
))
122 if (d
->type
!= PEER_NONE
)
125 #if PEER_MULTICAST_SIBLINGS
126 if (p
->type
== PEER_MULTICAST
)
127 if (p
->options
.mcast_siblings
)
135 * \return Whether it is appropriate to fetch REQUEST from PEER.
138 peerAllowedToUse(const CachePeer
* p
, PeerSelector
* ps
)
141 HttpRequest
*request
= ps
->request
;
142 assert(request
!= nullptr);
144 if (neighborType(p
, request
->url
) == PEER_SIBLING
) {
145 #if PEER_MULTICAST_SIBLINGS
146 if (p
->type
== PEER_MULTICAST
&& p
->options
.mcast_siblings
&&
147 (request
->flags
.noCache
|| request
->flags
.refresh
|| request
->flags
.loopDetected
|| request
->flags
.needValidation
))
148 debugs(15, 2, "multicast-siblings optimization match for " << *p
<< ", " << request
->url
.authority());
150 if (request
->flags
.noCache
)
153 if (request
->flags
.refresh
)
156 if (request
->flags
.loopDetected
)
159 if (request
->flags
.needValidation
)
163 // CONNECT requests are proxy requests. Not to be forwarded to origin servers.
164 // Unless the destination port matches, in which case we MAY perform a 'DIRECT' to this CachePeer.
165 if (p
->options
.originserver
&& request
->method
== Http::METHOD_CONNECT
&& request
->url
.port() != p
->http_port
)
168 if (p
->access
== nullptr)
171 ACLFilledChecklist
checklist(p
->access
, request
, nullptr);
172 checklist
.al
= ps
->al
;
173 if (ps
->al
&& ps
->al
->reply
) {
174 checklist
.reply
= ps
->al
->reply
.getRaw();
175 HTTPMSGLOCK(checklist
.reply
);
177 checklist
.syncAle(request
, nullptr);
178 return checklist
.fastCheck().allowed();
181 /* Return TRUE if it is okay to send an ICP request to this CachePeer. */
183 peerWouldBePinged(const CachePeer
* p
, PeerSelector
* ps
)
186 HttpRequest
*request
= ps
->request
;
188 if (p
->icp
.port
== 0)
191 if (p
->options
.no_query
)
194 if (p
->options
.mcast_responder
)
197 if (p
->n_addresses
== 0)
200 if (p
->options
.background_ping
&& (squid_curtime
- p
->stats
.last_query
< Config
.backgroundPingRate
))
203 /* the case below seems strange, but can happen if the
204 * URL host is on the other side of a firewall */
205 if (p
->type
== PEER_SIBLING
)
206 if (!request
->flags
.hierarchical
)
209 if (!peerAllowedToUse(p
, ps
))
212 /* Ping dead peers every timeout interval */
213 if (squid_curtime
- p
->stats
.last_query
> Config
.Timeout
.deadPeer
)
223 peerCanOpenMore(const CachePeer
*p
)
225 const int effectiveLimit
= p
->max_conn
<= 0 ? Squid_MaxFD
: p
->max_conn
;
226 const int remaining
= effectiveLimit
- p
->stats
.conn_open
;
227 debugs(15, 7, remaining
<< '=' << effectiveLimit
<< '-' << p
->stats
.conn_open
);
228 return remaining
> 0;
232 peerHasConnAvailable(const CachePeer
*p
)
234 // Standby connections can be used without opening new connections.
235 const int standbys
= p
->standby
.pool
? p
->standby
.pool
->count() : 0;
237 // XXX: Some idle pconns can be used without opening new connections.
238 // Complication: Idle pconns cannot be reused for some requests.
239 const int usableIdles
= 0;
241 const int available
= standbys
+ usableIdles
;
242 debugs(15, 7, available
<< '=' << standbys
<< '+' << usableIdles
);
243 return available
> 0;
247 peerConnClosed(CachePeer
*p
)
249 --p
->stats
.conn_open
;
250 if (p
->standby
.waitingForClose
&& peerCanOpenMore(p
)) {
251 p
->standby
.waitingForClose
= false;
252 PeerPoolMgr::Checkpoint(p
->standby
.mgr
, "conn closed");
256 /* Return TRUE if it is okay to send an HTTP request to this CachePeer. */
258 peerHTTPOkay(const CachePeer
* p
, PeerSelector
* ps
)
260 if (!peerCanOpenMore(p
) && !peerHasConnAvailable(p
))
263 if (!peerAllowedToUse(p
, ps
))
273 neighborsCount(PeerSelector
*ps
)
275 CachePeer
*p
= nullptr;
278 for (p
= Config
.peers
; p
; p
= p
->next
)
279 if (peerWouldBePinged(p
, ps
))
282 debugs(15, 3, "neighborsCount: " << count
);
288 getFirstUpParent(PeerSelector
*ps
)
291 HttpRequest
*request
= ps
->request
;
293 CachePeer
*p
= nullptr;
295 for (p
= Config
.peers
; p
; p
= p
->next
) {
299 if (neighborType(p
, request
->url
) != PEER_PARENT
)
302 if (!peerHTTPOkay(p
, ps
))
308 debugs(15, 3, "returning " << RawPointer(p
).orNil());
313 getRoundRobinParent(PeerSelector
*ps
)
316 HttpRequest
*request
= ps
->request
;
319 CachePeer
*q
= nullptr;
321 for (p
= Config
.peers
; p
; p
= p
->next
) {
322 if (!p
->options
.roundrobin
)
325 if (neighborType(p
, request
->url
) != PEER_PARENT
)
328 if (!peerHTTPOkay(p
, ps
))
335 if (p
->weight
== q
->weight
) {
336 if (q
->rr_count
< p
->rr_count
)
338 } else if ( ((double) q
->rr_count
/ q
->weight
) < ((double) p
->rr_count
/ p
->weight
)) {
349 debugs(15, 3, "returning " << RawPointer(q
).orNil());
355 getWeightedRoundRobinParent(PeerSelector
*ps
)
358 HttpRequest
*request
= ps
->request
;
361 CachePeer
*q
= nullptr;
364 for (p
= Config
.peers
; p
; p
= p
->next
) {
365 if (!p
->options
.weighted_roundrobin
)
368 if (neighborType(p
, request
->url
) != PEER_PARENT
)
371 if (!peerHTTPOkay(p
, ps
))
374 if (q
&& q
->rr_count
< p
->rr_count
)
380 if (q
&& q
->rr_count
> 1000000)
381 for (p
= Config
.peers
; p
; p
= p
->next
) {
382 if (!p
->options
.weighted_roundrobin
)
385 if (neighborType(p
, request
->url
) != PEER_PARENT
)
392 weighted_rtt
= (q
->stats
.rtt
- q
->basetime
) / q
->weight
;
394 if (weighted_rtt
< 1)
397 q
->rr_count
+= weighted_rtt
;
399 debugs(15, 3, "getWeightedRoundRobinParent: weighted_rtt " << weighted_rtt
);
402 debugs(15, 3, "returning " << RawPointer(q
).orNil());
407 * This gets called every 5 minutes to clear the round-robin counter.
408 * The exact timing is an arbitrary default, set on estimate timing of a
409 * large number of requests in a high-performance environment during the
410 * period. The larger the number of requests between cycled resets the
411 * more balanced the operations.
415 * TODO: Make the reset timing a selectable parameter in squid.conf
418 peerClearRRLoop(void *data
)
421 eventAdd("peerClearRR", peerClearRRLoop
, data
, 5 * 60.0, 0);
425 * This gets called on startup and restart to kick off the CachePeer round-robin
426 * maintenance event. It ensures that no matter how many times its called
427 * no more than one event is scheduled.
430 peerClearRRStart(void)
432 static bool event_added
= false;
434 peerClearRRLoop(nullptr);
440 * Called whenever the round-robin counters need to be reset to a sane state.
441 * So far those times are:
442 * - On startup and reconfigure - to set the counters to sane initial settings.
443 * - When a CachePeer has revived from dead, to prevent the revived CachePeer being
444 * flooded with requests which it has 'missed' during the down period.
449 CachePeer
*p
= nullptr;
450 for (p
= Config
.peers
; p
; p
= p
->next
) {
456 peerAlive(CachePeer
*p
)
458 if (p
->stats
.logged_state
== PEER_DEAD
&& p
->tcp_up
) {
459 debugs(15, DBG_IMPORTANT
, "Detected REVIVED " << neighborTypeStr(p
) << ": " << *p
);
460 p
->stats
.logged_state
= PEER_ALIVE
;
462 if (p
->standby
.mgr
.valid())
463 PeerPoolMgr::Checkpoint(p
->standby
.mgr
, "revived peer");
466 p
->stats
.last_reply
= squid_curtime
;
467 p
->stats
.probe_start
= 0;
469 // TODO: Remove or explain how we could detect an alive peer without IP addresses
471 ipcache_nbgethostbyname(p
->host
, peerDNSConfigure
, p
);
475 getDefaultParent(PeerSelector
*ps
)
478 HttpRequest
*request
= ps
->request
;
480 CachePeer
*p
= nullptr;
482 for (p
= Config
.peers
; p
; p
= p
->next
) {
483 if (neighborType(p
, request
->url
) != PEER_PARENT
)
486 if (!p
->options
.default_parent
)
489 if (!peerHTTPOkay(p
, ps
))
492 debugs(15, 3, "returning " << *p
);
497 // TODO: Refactor similar get*() functions to use our return/reporting style
498 debugs(15, 3, "none found");
503 getNextPeer(CachePeer
* p
)
515 neighborRemove(CachePeer
* target
)
517 CachePeer
*p
= nullptr;
518 CachePeer
**P
= nullptr;
538 first_ping
= Config
.peers
;
542 neighborsRegisterWithCacheManager()
544 Mgr::RegisterAction("server_list",
545 "Peer Cache Statistics",
546 neighborDumpPeers
, 0, 1);
552 struct servent
*sep
= nullptr;
553 const char *me
= getMyHostname();
554 CachePeer
*thisPeer
= nullptr;
555 CachePeer
*next
= nullptr;
557 neighborsRegisterWithCacheManager();
559 if (Comm::IsConnOpen(icpIncomingConn
)) {
561 for (thisPeer
= Config
.peers
; thisPeer
; thisPeer
= next
) {
562 next
= thisPeer
->next
;
564 if (0 != strcmp(thisPeer
->host
, me
))
567 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= nullptr; s
= s
->next
) {
568 if (thisPeer
->http_port
!= s
->s
.port())
571 debugs(15, DBG_IMPORTANT
, "WARNING: Peer looks like this host." <<
572 Debug::Extra
<< "Ignoring cache_peer " << *thisPeer
);
574 neighborRemove(thisPeer
);
579 peerRefreshDNS((void *) 1);
581 sep
= getservbyname("echo", "udp");
582 echo_port
= sep
? ntohs((unsigned short) sep
->s_port
) : 7;
584 first_ping
= Config
.peers
;
588 neighborsUdpPing(HttpRequest
* request
,
595 const char *url
= entry
->url();
596 MemObject
*mem
= entry
->mem_obj
;
597 CachePeer
*p
= nullptr;
601 int peers_pinged
= 0;
602 int parent_timeout
= 0, parent_exprep
= 0;
603 int sibling_timeout
= 0, sibling_exprep
= 0;
604 int mcast_timeout
= 0, mcast_exprep
= 0;
606 if (Config
.peers
== nullptr)
609 assert(!entry
->hasDisk());
611 mem
->start_ping
= current_time
;
613 mem
->ping_reply_callback
= callback
;
617 reqnum
= icpSetCacheKey((const cache_key
*)entry
->key
);
619 for (i
= 0, p
= first_ping
; i
++ < Config
.npeers
; p
= p
->next
) {
623 debugs(15, 5, "candidate: " << *p
);
625 if (!peerWouldBePinged(p
, ps
))
626 continue; /* next CachePeer */
630 debugs(15, 4, "pinging cache_peer " << *p
<< " for '" << url
<< "'");
632 debugs(15, 3, "neighborsUdpPing: key = '" << entry
->getMD5Text() << "'");
634 debugs(15, 3, "neighborsUdpPing: reqnum = " << reqnum
);
637 if (p
->options
.htcp
&& !p
->options
.htcp_only_clr
) {
638 if (Config
.Port
.htcp
<= 0) {
639 debugs(15, DBG_CRITICAL
, "ERROR: HTCP is disabled! Cannot send HTCP request to peer.");
643 debugs(15, 3, "neighborsUdpPing: sending HTCP query");
644 if (htcpQuery(entry
, request
, p
) <= 0)
645 continue; // unable to send.
649 if (Config
.Port
.icp
<= 0 || !Comm::IsConnOpen(icpOutgoingConn
)) {
650 debugs(15, DBG_CRITICAL
, "ERROR: ICP is disabled! Cannot send ICP request to peer.");
654 if (p
->type
== PEER_MULTICAST
)
655 mcastSetTtl(icpOutgoingConn
->fd
, p
->mcast
.ttl
);
657 if (p
->icp
.port
== echo_port
) {
658 debugs(15, 4, "neighborsUdpPing: Looks like a dumb cache, send DECHO ping");
659 // TODO: Get ALE from callback_data if possible.
660 icpCreateAndSend(ICP_DECHO
, 0, url
, reqnum
, 0,
661 icpOutgoingConn
->fd
, p
->in_addr
, nullptr);
665 if (Config
.onoff
.query_icmp
)
666 if (p
->icp
.version
== ICP_VERSION_2
)
667 flags
|= ICP_FLAG_SRC_RTT
;
669 // TODO: Get ALE from callback_data if possible.
670 icpCreateAndSend(ICP_QUERY
, flags
, url
, reqnum
, 0,
671 icpOutgoingConn
->fd
, p
->in_addr
, nullptr);
676 ++ p
->stats
.pings_sent
;
678 if (p
->type
== PEER_MULTICAST
) {
679 mcast_exprep
+= p
->mcast
.n_replies_expected
;
680 mcast_timeout
+= (p
->stats
.rtt
* p
->mcast
.n_replies_expected
);
681 } else if (neighborUp(p
)) {
682 /* its alive, expect a reply from it */
684 if (neighborType(p
, request
->url
) == PEER_PARENT
) {
686 parent_timeout
+= p
->stats
.rtt
;
689 sibling_timeout
+= p
->stats
.rtt
;
692 /* Neighbor is dead; ping it anyway, but don't expect a reply */
693 /* log it once at the threshold */
695 if (p
->stats
.logged_state
== PEER_ALIVE
) {
696 debugs(15, DBG_IMPORTANT
, "Detected DEAD " << neighborTypeStr(p
) << ": " << *p
);
697 p
->stats
.logged_state
= PEER_DEAD
;
701 p
->stats
.last_query
= squid_curtime
;
704 * keep probe_start == 0 for a multicast CachePeer,
705 * so neighborUp() never says this CachePeer is dead.
708 if ((p
->type
!= PEER_MULTICAST
) && (p
->stats
.probe_start
== 0))
709 p
->stats
.probe_start
= squid_curtime
;
712 if ((first_ping
= first_ping
->next
) == nullptr)
713 first_ping
= Config
.peers
;
716 * How many replies to expect?
718 *exprep
= parent_exprep
+ sibling_exprep
+ mcast_exprep
;
721 * If there is a configured timeout, use it
723 if (Config
.Timeout
.icp_query
)
724 *timeout
= Config
.Timeout
.icp_query
;
728 *timeout
= 2 * parent_timeout
/ parent_exprep
;
729 else if (mcast_exprep
)
730 *timeout
= 2 * mcast_timeout
/ mcast_exprep
;
732 *timeout
= 2 * sibling_timeout
/ sibling_exprep
;
734 *timeout
= 2000; /* 2 seconds */
736 if (Config
.Timeout
.icp_query_max
)
737 if (*timeout
> Config
.Timeout
.icp_query_max
)
738 *timeout
= Config
.Timeout
.icp_query_max
;
740 if (*timeout
< Config
.Timeout
.icp_query_min
)
741 *timeout
= Config
.Timeout
.icp_query_min
;
747 /* lookup the digest of a given CachePeer */
749 peerDigestLookup(CachePeer
* p
, PeerSelector
* ps
)
751 #if USE_CACHE_DIGESTS
753 HttpRequest
*request
= ps
->request
;
754 const cache_key
*key
= request
? storeKeyPublicByRequest(request
) : nullptr;
757 debugs(15, 5, "cache_peer " << *p
);
758 /* does the peeer have a valid digest? */
761 debugs(15, 5, "peerDigestLookup: gone!");
763 } else if (!peerHTTPOkay(p
, ps
)) {
764 debugs(15, 5, "peerDigestLookup: !peerHTTPOkay");
766 } else if (!p
->digest
->flags
.needed
) {
767 debugs(15, 5, "peerDigestLookup: note need");
768 peerDigestNeeded(p
->digest
);
770 } else if (!p
->digest
->flags
.usable
) {
771 debugs(15, 5, "peerDigestLookup: !ready && " << (p
->digest
->flags
.requested
? "" : "!") << "requested");
775 debugs(15, 5, "OK to lookup cache_peer " << *p
);
776 assert(p
->digest
->cd
);
777 /* does digest predict a hit? */
779 if (!p
->digest
->cd
->contains(key
))
782 debugs(15, 5, "HIT for cache_peer " << *p
);
793 /* select best CachePeer based on cache digests */
795 neighborsDigestSelect(PeerSelector
*ps
)
797 CachePeer
*best_p
= nullptr;
798 #if USE_CACHE_DIGESTS
800 HttpRequest
*request
= ps
->request
;
803 int choice_count
= 0;
804 int ichoice_count
= 0;
809 if (!request
->flags
.hierarchical
)
812 storeKeyPublicByRequest(request
);
814 for (i
= 0, p
= first_ping
; i
++ < Config
.npeers
; p
= p
->next
) {
823 lookup
= peerDigestLookup(p
, ps
);
825 if (lookup
== LOOKUP_NONE
)
830 if (lookup
== LOOKUP_MISS
)
833 p_rtt
= netdbHostRtt(p
->host
);
835 debugs(15, 5, "cache_peer " << *p
<< " rtt: " << p_rtt
);
837 /* is this CachePeer better than others in terms of rtt ? */
838 if (!best_p
|| (p_rtt
&& p_rtt
< best_rtt
)) {
842 if (p_rtt
) /* informative choice (aka educated guess) */
845 debugs(15, 4, "cache_peer " << *p
<< " leads with rtt " << best_rtt
);
849 debugs(15, 4, "neighborsDigestSelect: choices: " << choice_count
<< " (" << ichoice_count
<< ")");
850 peerNoteDigestLookup(request
, best_p
,
851 best_p
? LOOKUP_HIT
: (choice_count
? LOOKUP_MISS
: LOOKUP_NONE
));
852 request
->hier
.n_choices
= choice_count
;
853 request
->hier
.n_ichoices
= ichoice_count
;
862 peerNoteDigestLookup(HttpRequest
* request
, CachePeer
* p
, lookup_t lookup
)
864 #if USE_CACHE_DIGESTS
866 strncpy(request
->hier
.cd_host
, p
->host
, sizeof(request
->hier
.cd_host
)-1);
868 *request
->hier
.cd_host
= '\0';
870 request
->hier
.cd_lookup
= lookup
;
871 debugs(15, 4, "cache_peer " << RawPointer(p
).orNil() << ", lookup: " << lookup_t_str
[lookup
]);
880 neighborAlive(CachePeer
* p
, const MemObject
*, const icp_common_t
* header
)
883 ++ p
->stats
.pings_acked
;
885 if ((icp_opcode
) header
->opcode
<= ICP_END
)
886 ++ p
->icp
.counts
[header
->opcode
];
888 p
->icp
.version
= (int) header
->version
;
892 neighborUpdateRtt(CachePeer
* p
, MemObject
* mem
)
894 int rtt
, rtt_av_factor
;
899 if (!mem
->start_ping
.tv_sec
)
902 rtt
= tvSubMsec(mem
->start_ping
, current_time
);
904 if (rtt
< 1 || rtt
> 10000)
907 rtt_av_factor
= RTT_AV_FACTOR
;
909 if (p
->options
.weighted_roundrobin
)
910 rtt_av_factor
= RTT_BACKGROUND_AV_FACTOR
;
912 p
->stats
.rtt
= Math::intAverage(p
->stats
.rtt
, rtt
, p
->stats
.pings_acked
, rtt_av_factor
);
917 neighborAliveHtcp(CachePeer
* p
, const MemObject
*, const HtcpReplyData
* htcp
)
920 ++ p
->stats
.pings_acked
;
921 ++ p
->htcp
.counts
[htcp
->hit
? 1 : 0];
922 p
->htcp
.version
= htcp
->version
;
928 neighborCountIgnored(CachePeer
* p
)
933 ++ p
->stats
.ignored_replies
;
939 neighborIgnoreNonPeer(const Ip::Address
&from
, icp_opcode opcode
)
941 static uint64_t ignoredReplies
= 0;
942 if (isPowTen(++ignoredReplies
)) {
943 debugs(15, DBG_IMPORTANT
, "WARNING: Ignored " << ignoredReplies
<< " ICP replies from non-peers" <<
944 Debug::Extra
<< "last seen non-peer source address: " << from
<<
945 Debug::Extra
<< "last seen ICP reply opcode: " << icp_opcode_str
[opcode
]);
949 /* ignoreMulticastReply
951 * * We want to ignore replies from multicast peers if the
952 * * cache_host_domain rules would normally prevent the CachePeer
956 ignoreMulticastReply(CachePeer
* p
, PeerSelector
* ps
)
961 if (!p
->options
.mcast_responder
)
964 if (peerHTTPOkay(p
, ps
))
971 * I should attach these records to the entry. We take the first
972 * hit we get our wait until everyone misses. The timeout handler
973 * call needs to nip this shopping list or call one of the misses.
975 * If a hit process is already started, then sobeit
978 neighborsUdpAck(const cache_key
* key
, icp_common_t
* header
, const Ip::Address
&from
)
980 CachePeer
*p
= nullptr;
982 MemObject
*mem
= nullptr;
983 peer_t ntype
= PEER_NONE
;
984 icp_opcode opcode
= (icp_opcode
) header
->opcode
;
986 debugs(15, 6, "neighborsUdpAck: opcode " << opcode
<< " '" << storeKeyText(key
) << "'");
988 if ((entry
= Store::Root().findCallbackXXX(key
)))
989 mem
= entry
->mem_obj
;
991 if ((p
= whichPeer(from
)))
992 neighborAlive(p
, mem
, header
);
994 if (opcode
> ICP_END
)
997 const char *opcode_d
= icp_opcode_str
[opcode
];
1000 neighborUpdateRtt(p
, mem
);
1002 /* Does the entry exist? */
1003 if (nullptr == entry
) {
1004 debugs(12, 3, "neighborsUdpAck: Cache key '" << storeKeyText(key
) << "' not found");
1005 neighborCountIgnored(p
);
1009 /* check if someone is already fetching it */
1010 if (EBIT_TEST(entry
->flags
, ENTRY_DISPATCHED
)) {
1011 debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key
) << "' already being fetched.");
1012 neighborCountIgnored(p
);
1016 if (mem
== nullptr) {
1017 debugs(15, 2, "Ignoring " << opcode_d
<< " for missing mem_obj: " << storeKeyText(key
));
1018 neighborCountIgnored(p
);
1022 if (entry
->ping_status
!= PING_WAITING
) {
1023 debugs(15, 2, "neighborsUdpAck: Late " << opcode_d
<< " for " << storeKeyText(key
));
1024 neighborCountIgnored(p
);
1028 if (!entry
->locked()) {
1029 // TODO: many entries are unlocked; why is this reported at level 1?
1030 debugs(12, DBG_IMPORTANT
, "neighborsUdpAck: '" << storeKeyText(key
) << "' has no locks");
1031 neighborCountIgnored(p
);
1035 if (!mem
->ircb_data
) {
1036 debugs(12, DBG_IMPORTANT
, "ERROR: Squid BUG: missing ICP callback data for " << *entry
);
1037 neighborCountIgnored(p
);
1041 debugs(15, 3, opcode_d
<< " for " << storeKeyText(key
) << " from " << RawPointer(p
).orNil("source"));
1044 ntype
= neighborType(p
, mem
->request
->url
);
1047 if (ignoreMulticastReply(p
, mem
->ircb_data
)) {
1048 neighborCountIgnored(p
);
1049 } else if (opcode
== ICP_MISS
) {
1051 neighborIgnoreNonPeer(from
, opcode
);
1053 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1055 } else if (opcode
== ICP_HIT
) {
1057 neighborIgnoreNonPeer(from
, opcode
);
1059 header
->opcode
= ICP_HIT
;
1060 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1062 } else if (opcode
== ICP_DECHO
) {
1064 neighborIgnoreNonPeer(from
, opcode
);
1065 } else if (ntype
== PEER_SIBLING
) {
1066 debug_trap("neighborsUdpAck: Found non-ICP cache as SIBLING\n");
1067 debug_trap("neighborsUdpAck: non-ICP neighbors must be a PARENT\n");
1069 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1071 } else if (opcode
== ICP_SECHO
) {
1073 debugs(15, DBG_IMPORTANT
, "Ignoring SECHO from neighbor " << *p
);
1074 neighborCountIgnored(p
);
1076 debugs(15, DBG_IMPORTANT
, "Unsolicited SECHO from " << from
);
1078 } else if (opcode
== ICP_DENIED
) {
1080 neighborIgnoreNonPeer(from
, opcode
);
1081 } else if (p
->stats
.pings_acked
> 100) {
1082 if (100 * p
->icp
.counts
[ICP_DENIED
] / p
->stats
.pings_acked
> 95) {
1083 debugs(15, DBG_CRITICAL
, "Disabling cache_peer " << *p
<<
1084 " because over 95% of its replies are UDP_DENIED");
1088 neighborCountIgnored(p
);
1091 } else if (opcode
== ICP_MISS_NOFETCH
) {
1092 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1094 debugs(15, DBG_CRITICAL
, "ERROR: neighborsUdpAck: Unexpected ICP reply: " << opcode_d
);
1099 findCachePeerByName(const char * const name
)
1101 CachePeer
*p
= nullptr;
1103 for (p
= Config
.peers
; p
; p
= p
->next
) {
1104 if (!strcasecmp(name
, p
->name
))
1112 neighborUp(const CachePeer
* p
)
1115 // TODO: When CachePeer gets its own CodeContext, pass that context instead of nullptr
1116 CallService(nullptr, [&] {
1117 peerProbeConnect(const_cast<CachePeer
*>(p
));
1123 * The CachePeer can not be UP if we don't have any IP addresses
1126 if (0 == p
->n_addresses
) {
1127 debugs(15, 8, "DOWN (no-ip): " << *p
);
1131 if (p
->options
.no_query
) {
1132 debugs(15, 8, "UP (no-query): " << *p
);
1136 if (p
->stats
.probe_start
!= 0 &&
1137 squid_curtime
- p
->stats
.probe_start
> Config
.Timeout
.deadPeer
) {
1138 debugs(15, 8, "DOWN (dead): " << *p
);
1142 debugs(15, 8, "UP: " << *p
);
1147 positiveTimeout(const time_t timeout
)
1149 return max(static_cast<time_t>(1), timeout
);
1153 peerDNSConfigure(const ipcache_addrs
*ia
, const Dns::LookupDetails
&, void *data
)
1155 // TODO: connections to no-longer valid IP addresses should be
1156 // closed when we can detect such IP addresses.
1158 CachePeer
*p
= (CachePeer
*)data
;
1160 if (p
->n_addresses
== 0) {
1161 debugs(15, Important(29), "Configuring " << neighborTypeStr(p
) << " " << *p
);
1163 if (p
->type
== PEER_MULTICAST
)
1164 debugs(15, DBG_IMPORTANT
, " Multicast TTL = " << p
->mcast
.ttl
);
1169 if (ia
== nullptr) {
1170 debugs(0, DBG_CRITICAL
, "WARNING: DNS lookup for '" << *p
<< "' failed!");
1175 debugs(0, DBG_CRITICAL
, "WARNING: No IP address found for '" << *p
<< "'!");
1179 for (const auto &ip
: ia
->goodAndBad()) { // TODO: Consider using just good().
1180 if (p
->n_addresses
< PEER_MAX_ADDRESSES
) {
1181 const auto idx
= p
->n_addresses
++;
1182 p
->addresses
[idx
] = ip
;
1183 debugs(15, 2, "--> IP address #" << idx
<< ": " << p
->addresses
[idx
]);
1185 debugs(15, 3, "ignoring remaining " << (ia
->size() - p
->n_addresses
) << " ips");
1190 p
->in_addr
.setEmpty();
1191 p
->in_addr
= p
->addresses
[0];
1192 p
->in_addr
.port(p
->icp
.port
);
1194 peerProbeConnect(p
, true); // detect any died or revived peers ASAP
1196 if (p
->type
== PEER_MULTICAST
)
1197 peerCountMcastPeersSchedule(p
, 10);
1200 if (p
->type
!= PEER_MULTICAST
&& IamWorkerProcess())
1201 if (!p
->options
.no_netdb_exchange
)
1202 eventAddIsh("netdbExchangeStart", netdbExchangeStart
, p
, 30.0, 1);
1205 if (p
->standby
.mgr
.valid())
1206 PeerPoolMgr::Checkpoint(p
->standby
.mgr
, "resolved peer");
1210 peerRefreshDNS(void *data
)
1212 CachePeer
*p
= nullptr;
1214 if (eventFind(peerRefreshDNS
, nullptr))
1215 eventDelete(peerRefreshDNS
, nullptr);
1217 if (!data
&& 0 == stat5minClientRequests()) {
1218 /* no recent client traffic, wait a bit */
1219 eventAddIsh("peerRefreshDNS", peerRefreshDNS
, nullptr, 180.0, 1);
1223 for (p
= Config
.peers
; p
; p
= p
->next
)
1224 ipcache_nbgethostbyname(p
->host
, peerDNSConfigure
, p
);
1226 /* Reconfigure the peers every hour */
1227 eventAddIsh("peerRefreshDNS", peerRefreshDNS
, nullptr, 3600.0, 1);
1230 /// whether new TCP probes are currently banned
1232 peerProbeIsBusy(const CachePeer
*p
)
1234 if (p
->testing_now
> 0) {
1235 debugs(15, 8, "yes, probing " << p
);
1238 if (squid_curtime
- p
->stats
.last_connect_probe
== 0) {
1239 debugs(15, 8, "yes, just probed " << p
);
1245 * peerProbeConnect will be called on dead peers by neighborUp
1248 peerProbeConnect(CachePeer
*p
, const bool reprobeIfBusy
)
1250 if (peerProbeIsBusy(p
)) {
1251 p
->reprobe
= reprobeIfBusy
;
1256 const auto ctimeout
= p
->connectTimeout();
1257 /* for each IP address of this CachePeer. find one that we can connect to and probe it. */
1258 for (int i
= 0; i
< p
->n_addresses
; ++i
) {
1259 Comm::ConnectionPointer conn
= new Comm::Connection
;
1260 conn
->remote
= p
->addresses
[i
];
1261 conn
->remote
.port(p
->http_port
);
1263 getOutgoingAddress(nullptr, conn
);
1267 AsyncCall::Pointer call
= commCbCall(15,3, "peerProbeConnectDone", CommConnectCbPtrFun(peerProbeConnectDone
, p
));
1268 Comm::ConnOpener
*cs
= new Comm::ConnOpener(conn
, call
, ctimeout
);
1269 cs
->setHost(p
->host
);
1270 AsyncJob::Start(cs
);
1273 p
->stats
.last_connect_probe
= squid_curtime
;
1277 peerProbeConnectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int, void *data
)
1279 CachePeer
*p
= (CachePeer
*)data
;
1281 if (status
== Comm::OK
)
1284 p
->noteFailure(Http::scNone
);
1288 // TODO: log this traffic.
1291 peerProbeConnect(p
);
1295 peerCountMcastPeersSchedule(CachePeer
* p
, time_t when
)
1297 if (p
->mcast
.flags
.count_event_pending
)
1300 eventAdd("peerCountMcastPeersStart",
1301 peerCountMcastPeersStart
,
1305 p
->mcast
.flags
.count_event_pending
= true;
1309 peerCountMcastPeersStart(void *data
)
1311 const auto peer
= static_cast<CachePeer
*>(data
);
1312 CallContextCreator([peer
] {
1313 peerCountMcastPeersCreateAndSend(peer
);
1315 peerCountMcastPeersSchedule(peer
, MCAST_COUNT_RATE
);
1318 /// initiates an ICP transaction to a multicast peer
1320 peerCountMcastPeersCreateAndSend(CachePeer
* const p
)
1322 // XXX: Do not create lots of complex fake objects (while abusing their
1323 // APIs) to pass around a few basic data points like start_ping and ping!
1326 // TODO: use class AnyP::Uri instead of constructing and re-parsing a string
1327 LOCAL_ARRAY(char, url
, MAX_URL
);
1328 assert(p
->type
== PEER_MULTICAST
);
1329 p
->mcast
.flags
.count_event_pending
= false;
1330 snprintf(url
, MAX_URL
, "http://");
1331 p
->in_addr
.toUrl(url
+7, MAX_URL
-8 );
1333 const auto mx
= MasterXaction::MakePortless
<XactionInitiator::initPeerMcast
>();
1334 auto *req
= HttpRequest::FromUrlXXX(url
, mx
);
1335 assert(req
!= nullptr);
1336 const AccessLogEntry::Pointer ale
= new AccessLogEntry
;
1338 CodeContext::Reset(ale
);
1339 StoreEntry
*fake
= storeCreateEntry(url
, url
, RequestFlags(), Http::METHOD_GET
);
1340 const auto psstate
= new PeerSelector(nullptr);
1341 psstate
->request
= req
;
1342 HTTPMSGLOCK(psstate
->request
);
1343 psstate
->entry
= fake
;
1344 psstate
->peerCountMcastPeerXXX
= cbdataReference(p
);
1345 psstate
->ping
.start
= current_time
;
1347 mem
= fake
->mem_obj
;
1348 mem
->request
= psstate
->request
;
1349 mem
->start_ping
= current_time
;
1350 mem
->ping_reply_callback
= peerCountHandleIcpReply
;
1351 mem
->ircb_data
= psstate
;
1352 mcastSetTtl(icpOutgoingConn
->fd
, p
->mcast
.ttl
);
1353 p
->mcast
.id
= mem
->id
;
1354 reqnum
= icpSetCacheKey((const cache_key
*)fake
->key
);
1355 icpCreateAndSend(ICP_QUERY
, 0, url
, reqnum
, 0,
1356 icpOutgoingConn
->fd
, p
->in_addr
, psstate
->al
);
1357 fake
->ping_status
= PING_WAITING
; // TODO: refactor to use PeerSelector::startPingWaiting()
1358 eventAdd("peerCountMcastPeersDone",
1359 peerCountMcastPeersDone
,
1361 Config
.Timeout
.mcast_icp_query
/ 1000.0, 1);
1362 p
->mcast
.flags
.counting
= true;
1366 peerCountMcastPeersDone(void *data
)
1368 const auto psstate
= static_cast<PeerSelector
*>(data
);
1369 CallBack(psstate
->al
, [psstate
] {
1370 peerCountMcastPeersAbort(psstate
);
1375 /// ends counting of multicast ICP replies
1376 /// to the ICP query initiated by peerCountMcastPeersCreateAndSend()
1378 peerCountMcastPeersAbort(PeerSelector
* const psstate
)
1380 StoreEntry
*fake
= psstate
->entry
;
1382 if (cbdataReferenceValid(psstate
->peerCountMcastPeerXXX
)) {
1383 CachePeer
*p
= (CachePeer
*)psstate
->peerCountMcastPeerXXX
;
1384 p
->mcast
.flags
.counting
= false;
1385 p
->mcast
.avg_n_members
= Math::doubleAverage(p
->mcast
.avg_n_members
, (double) psstate
->ping
.n_recv
, ++p
->mcast
.n_times_counted
, 10);
1386 debugs(15, DBG_IMPORTANT
, "Group " << *p
<< ": " << psstate
->ping
.n_recv
<<
1387 " replies, "<< std::setw(4)<< std::setprecision(2) <<
1388 p
->mcast
.avg_n_members
<<" average, RTT " << p
->stats
.rtt
);
1389 p
->mcast
.n_replies_expected
= (int) p
->mcast
.avg_n_members
;
1392 cbdataReferenceDone(psstate
->peerCountMcastPeerXXX
);
1394 fake
->abort(); // sets ENTRY_ABORTED and initiates related cleanup
1395 fake
->mem_obj
->request
= nullptr;
1396 fake
->unlock("peerCountMcastPeersDone");
1400 peerCountHandleIcpReply(CachePeer
* p
, peer_t
, AnyP::ProtocolType proto
, void *, void *data
)
1402 const auto psstate
= static_cast<PeerSelector
*>(data
);
1403 StoreEntry
*fake
= psstate
->entry
;
1405 MemObject
*mem
= fake
->mem_obj
;
1407 int rtt
= tvSubMsec(mem
->start_ping
, current_time
);
1408 assert(proto
== AnyP::PROTO_ICP
);
1409 ++ psstate
->ping
.n_recv
;
1410 int rtt_av_factor
= RTT_AV_FACTOR
;
1412 if (p
->options
.weighted_roundrobin
)
1413 rtt_av_factor
= RTT_BACKGROUND_AV_FACTOR
;
1415 p
->stats
.rtt
= Math::intAverage(p
->stats
.rtt
, rtt
, psstate
->ping
.n_recv
, rtt_av_factor
);
1419 neighborDumpPeers(StoreEntry
* sentry
)
1421 dump_peers(sentry
, Config
.peers
);
1425 dump_peer_options(StoreEntry
* sentry
, CachePeer
* p
)
1427 if (p
->options
.proxy_only
)
1428 storeAppendPrintf(sentry
, " proxy-only");
1430 if (p
->options
.no_query
)
1431 storeAppendPrintf(sentry
, " no-query");
1433 if (p
->options
.background_ping
)
1434 storeAppendPrintf(sentry
, " background-ping");
1436 if (p
->options
.no_digest
)
1437 storeAppendPrintf(sentry
, " no-digest");
1439 if (p
->options
.default_parent
)
1440 storeAppendPrintf(sentry
, " default");
1442 if (p
->options
.roundrobin
)
1443 storeAppendPrintf(sentry
, " round-robin");
1445 if (p
->options
.carp
)
1446 storeAppendPrintf(sentry
, " carp");
1449 if (p
->options
.userhash
)
1450 storeAppendPrintf(sentry
, " userhash");
1453 if (p
->options
.sourcehash
)
1454 storeAppendPrintf(sentry
, " sourcehash");
1456 if (p
->options
.weighted_roundrobin
)
1457 storeAppendPrintf(sentry
, " weighted-round-robin");
1459 if (p
->options
.mcast_responder
)
1460 storeAppendPrintf(sentry
, " multicast-responder");
1462 #if PEER_MULTICAST_SIBLINGS
1463 if (p
->options
.mcast_siblings
)
1464 storeAppendPrintf(sentry
, " multicast-siblings");
1468 storeAppendPrintf(sentry
, " weight=%d", p
->weight
);
1470 if (p
->options
.closest_only
)
1471 storeAppendPrintf(sentry
, " closest-only");
1474 if (p
->options
.htcp
) {
1475 storeAppendPrintf(sentry
, " htcp");
1476 if (p
->options
.htcp_oldsquid
|| p
->options
.htcp_no_clr
|| p
->options
.htcp_no_purge_clr
|| p
->options
.htcp_only_clr
) {
1477 bool doneopts
= false;
1478 if (p
->options
.htcp_oldsquid
) {
1479 storeAppendPrintf(sentry
, "oldsquid");
1482 if (p
->options
.htcp_no_clr
) {
1483 storeAppendPrintf(sentry
, "%sno-clr",(doneopts
?",":"="));
1486 if (p
->options
.htcp_no_purge_clr
) {
1487 storeAppendPrintf(sentry
, "%sno-purge-clr",(doneopts
?",":"="));
1490 if (p
->options
.htcp_only_clr
) {
1491 storeAppendPrintf(sentry
, "%sonly-clr",(doneopts
?",":"="));
1492 //doneopts = true; // uncomment if more opts are added
1498 if (p
->options
.no_netdb_exchange
)
1499 storeAppendPrintf(sentry
, " no-netdb-exchange");
1502 if (p
->options
.no_delay
)
1503 storeAppendPrintf(sentry
, " no-delay");
1507 storeAppendPrintf(sentry
, " login=%s", p
->login
);
1509 if (p
->mcast
.ttl
> 0)
1510 storeAppendPrintf(sentry
, " ttl=%d", p
->mcast
.ttl
);
1512 if (p
->connect_timeout_raw
> 0)
1513 storeAppendPrintf(sentry
, " connect-timeout=%d", (int)p
->connect_timeout_raw
);
1515 if (p
->connect_fail_limit
!= PEER_TCP_MAGIC_COUNT
)
1516 storeAppendPrintf(sentry
, " connect-fail-limit=%d", p
->connect_fail_limit
);
1518 #if USE_CACHE_DIGESTS
1521 storeAppendPrintf(sentry
, " digest-url=%s", p
->digest_url
);
1525 if (p
->options
.allow_miss
)
1526 storeAppendPrintf(sentry
, " allow-miss");
1528 if (p
->options
.no_tproxy
)
1529 storeAppendPrintf(sentry
, " no-tproxy");
1531 if (p
->max_conn
> 0)
1532 storeAppendPrintf(sentry
, " max-conn=%d", p
->max_conn
);
1533 if (p
->standby
.limit
> 0)
1534 storeAppendPrintf(sentry
, " standby=%d", p
->standby
.limit
);
1536 if (p
->options
.originserver
)
1537 storeAppendPrintf(sentry
, " originserver");
1540 storeAppendPrintf(sentry
, " forceddomain=%s", p
->domain
);
1542 if (p
->connection_auth
== 0)
1543 storeAppendPrintf(sentry
, " connection-auth=off");
1544 else if (p
->connection_auth
== 1)
1545 storeAppendPrintf(sentry
, " connection-auth=on");
1546 else if (p
->connection_auth
== 2)
1547 storeAppendPrintf(sentry
, " connection-auth=auto");
1549 p
->secure
.dumpCfg(sentry
,"tls-");
1550 storeAppendPrintf(sentry
, "\n");
1554 dump_peers(StoreEntry
* sentry
, CachePeer
* peers
)
1556 char ntoabuf
[MAX_IPSTRLEN
];
1559 if (peers
== nullptr)
1560 storeAppendPrintf(sentry
, "There are no neighbors installed.\n");
1562 for (CachePeer
*e
= peers
; e
; e
= e
->next
) {
1563 assert(e
->host
!= nullptr);
1564 storeAppendPrintf(sentry
, "\n%-11.11s: %s\n",
1567 storeAppendPrintf(sentry
, "Host : %s/%d/%d\n",
1571 storeAppendPrintf(sentry
, "Flags :");
1572 dump_peer_options(sentry
, e
);
1574 for (i
= 0; i
< e
->n_addresses
; ++i
) {
1575 storeAppendPrintf(sentry
, "Address[%d] : %s\n", i
,
1576 e
->addresses
[i
].toStr(ntoabuf
,MAX_IPSTRLEN
) );
1579 storeAppendPrintf(sentry
, "Status : %s\n",
1580 neighborUp(e
) ? "Up" : "Down");
1581 storeAppendPrintf(sentry
, "FETCHES : %d\n", e
->stats
.fetches
);
1582 storeAppendPrintf(sentry
, "OPEN CONNS : %d\n", e
->stats
.conn_open
);
1583 storeAppendPrintf(sentry
, "AVG RTT : %d msec\n", e
->stats
.rtt
);
1585 if (!e
->options
.no_query
) {
1586 storeAppendPrintf(sentry
, "LAST QUERY : %8d seconds ago\n",
1587 (int) (squid_curtime
- e
->stats
.last_query
));
1589 if (e
->stats
.last_reply
> 0)
1590 storeAppendPrintf(sentry
, "LAST REPLY : %8d seconds ago\n",
1591 (int) (squid_curtime
- e
->stats
.last_reply
));
1593 storeAppendPrintf(sentry
, "LAST REPLY : none received\n");
1595 storeAppendPrintf(sentry
, "PINGS SENT : %8d\n", e
->stats
.pings_sent
);
1597 storeAppendPrintf(sentry
, "PINGS ACKED: %8d %3d%%\n",
1598 e
->stats
.pings_acked
,
1599 Math::intPercent(e
->stats
.pings_acked
, e
->stats
.pings_sent
));
1602 storeAppendPrintf(sentry
, "IGNORED : %8d %3d%%\n", e
->stats
.ignored_replies
, Math::intPercent(e
->stats
.ignored_replies
, e
->stats
.pings_acked
));
1604 if (!e
->options
.no_query
) {
1605 storeAppendPrintf(sentry
, "Histogram of PINGS ACKED:\n");
1608 if (e
->options
.htcp
) {
1609 storeAppendPrintf(sentry
, "\tMisses\t%8d %3d%%\n",
1611 Math::intPercent(e
->htcp
.counts
[0], e
->stats
.pings_acked
));
1612 storeAppendPrintf(sentry
, "\tHits\t%8d %3d%%\n",
1614 Math::intPercent(e
->htcp
.counts
[1], e
->stats
.pings_acked
));
1618 for (auto op
: WholeEnum
<icp_opcode
>()) {
1619 if (e
->icp
.counts
[op
] == 0)
1622 storeAppendPrintf(sentry
, " %12.12s : %8d %3d%%\n",
1625 Math::intPercent(e
->icp
.counts
[op
], e
->stats
.pings_acked
));
1636 if (e
->stats
.last_connect_failure
) {
1637 storeAppendPrintf(sentry
, "Last failed connect() at: %s\n",
1638 Time::FormatHttpd(e
->stats
.last_connect_failure
));
1641 storeAppendPrintf(sentry
, "keep-alive ratio: %d%%\n", Math::intPercent(e
->stats
.n_keepalives_recv
, e
->stats
.n_keepalives_sent
));
1647 neighborsHtcpReply(const cache_key
* key
, HtcpReplyData
* htcp
, const Ip::Address
&from
)
1649 StoreEntry
*e
= Store::Root().findCallbackXXX(key
);
1650 MemObject
*mem
= nullptr;
1652 peer_t ntype
= PEER_NONE
;
1653 debugs(15, 6, "neighborsHtcpReply: " <<
1654 (htcp
->hit
? "HIT" : "MISS") << " " <<
1655 storeKeyText(key
) );
1660 if ((p
= whichPeer(from
)))
1661 neighborAliveHtcp(p
, mem
, htcp
);
1663 /* Does the entry exist? */
1665 debugs(12, 3, "neighyborsHtcpReply: Cache key '" << storeKeyText(key
) << "' not found");
1666 neighborCountIgnored(p
);
1670 /* check if someone is already fetching it */
1671 if (EBIT_TEST(e
->flags
, ENTRY_DISPATCHED
)) {
1672 debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key
) << "' already being fetched.");
1673 neighborCountIgnored(p
);
1677 if (mem
== nullptr) {
1678 debugs(15, 2, "Ignoring reply for missing mem_obj: " << storeKeyText(key
));
1679 neighborCountIgnored(p
);
1683 if (e
->ping_status
!= PING_WAITING
) {
1684 debugs(15, 2, "neighborsUdpAck: Entry " << storeKeyText(key
) << " is not PING_WAITING");
1685 neighborCountIgnored(p
);
1690 // TODO: many entries are unlocked; why is this reported at level 1?
1691 debugs(12, DBG_IMPORTANT
, "neighborsUdpAck: '" << storeKeyText(key
) << "' has no locks");
1692 neighborCountIgnored(p
);
1696 if (!mem
->ircb_data
) {
1697 debugs(12, DBG_IMPORTANT
, "ERROR: Squid BUG: missing HTCP callback data for " << *e
);
1698 neighborCountIgnored(p
);
1703 ntype
= neighborType(p
, mem
->request
->url
);
1704 neighborUpdateRtt(p
, mem
);
1707 if (ignoreMulticastReply(p
, mem
->ircb_data
)) {
1708 neighborCountIgnored(p
);
1712 debugs(15, 3, "neighborsHtcpReply: e = " << e
);
1713 // TODO: Refactor (ping_reply_callback,ircb_data) to add CodeContext.
1714 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_HTCP
, htcp
, mem
->ircb_data
);
1718 * Send HTCP CLR messages to all peers configured to receive them.
1721 neighborsHtcpClear(StoreEntry
* e
, HttpRequest
* req
, const HttpRequestMethod
&method
, htcp_clr_reason reason
)
1726 for (p
= Config
.peers
; p
; p
= p
->next
) {
1727 if (!p
->options
.htcp
) {
1730 if (p
->options
.htcp_no_clr
) {
1733 if (p
->options
.htcp_no_purge_clr
&& reason
== HTCP_CLR_PURGE
) {
1736 debugs(15, 3, "neighborsHtcpClear: sending CLR to " << p
->in_addr
.toUrl(buf
, 128));
1737 htcpClear(e
, req
, method
, p
, reason
);