2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 15 Neighbor Routines */
12 #include "acl/FilledChecklist.h"
13 #include "anyp/PortCfg.h"
14 #include "base/EnumIterator.h"
15 #include "base/IoManip.h"
16 #include "base/PackableStream.h"
17 #include "base/PrecomputedCodeContext.h"
18 #include "CacheDigest.h"
19 #include "CachePeer.h"
20 #include "CachePeers.h"
21 #include "comm/Connection.h"
22 #include "comm/ConnOpener.h"
23 #include "debug/Messages.h"
28 #include "HttpRequest.h"
29 #include "icmp/net_db.h"
32 #include "ip/Address.h"
35 #include "MemObject.h"
36 #include "mgr/Registration.h"
37 #include "multicast.h"
38 #include "neighbors.h"
39 #include "NeighborTypeDomainList.h"
41 #include "PeerDigest.h"
42 #include "PeerPoolMgr.h"
43 #include "PeerSelectState.h"
44 #include "RequestFlags.h"
45 #include "SquidConfig.h"
46 #include "SquidMath.h"
49 #include "store_key_md5.h"
52 /* count mcast group peers every 15 minutes */
53 #define MCAST_COUNT_RATE 900
55 bool peerAllowedToUse(const CachePeer
*, PeerSelector
*);
56 static int peerWouldBePinged(const CachePeer
*, PeerSelector
*);
57 static void neighborAlive(CachePeer
*, const MemObject
*, const icp_common_t
*);
59 static void neighborAliveHtcp(CachePeer
*, const MemObject
*, const HtcpReplyData
*);
61 static void neighborCountIgnored(CachePeer
*);
62 static void peerDnsRefreshCheck(void *);
63 static void peerDnsRefreshStart();
64 static IPH peerDNSConfigure
;
65 static void peerProbeConnect(CachePeer
*, const bool reprobeIfBusy
= false);
66 static CNCB peerProbeConnectDone
;
67 static void peerCountMcastPeersDone(void *data
);
68 static void peerCountMcastPeersStart(void *data
);
69 static void peerCountMcastPeersSchedule(CachePeer
* p
, time_t when
);
70 static void peerCountMcastPeersAbort(PeerSelector
*);
71 static void peerCountMcastPeersCreateAndSend(CachePeer
*p
);
72 static IRCB peerCountHandleIcpReply
;
74 static void neighborIgnoreNonPeer(const Ip::Address
&, icp_opcode
);
75 static OBJH neighborDumpPeers
;
76 static void dump_peers(StoreEntry
*, CachePeers
*);
78 static unsigned short echo_port
;
80 static int NLateReplies
= 0;
83 neighborTypeStr(const CachePeer
* p
)
85 if (p
->type
== PEER_NONE
)
88 if (p
->type
== PEER_SIBLING
)
91 if (p
->type
== PEER_MULTICAST
)
92 return "Multicast Group";
98 whichPeer(const Ip::Address
&from
)
102 debugs(15, 3, "whichPeer: from " << from
);
104 for (const auto &p
: CurrentCachePeers()) {
105 for (j
= 0; j
< p
->n_addresses
; ++j
) {
106 if (from
== p
->addresses
[j
] && from
.port() == p
->icp
.port
) {
116 neighborType(const CachePeer
* p
, const AnyP::Uri
&url
)
119 const NeighborTypeDomainList
*d
= nullptr;
121 for (d
= p
->typelist
; d
; d
= d
->next
) {
122 if (0 == matchDomainName(url
.host(), d
->domain
))
123 if (d
->type
!= PEER_NONE
)
127 if (p
->type
== PEER_MULTICAST
&& p
->options
.mcast_siblings
)
134 * \return Whether it is appropriate to fetch REQUEST from PEER.
137 peerAllowedToUse(const CachePeer
* p
, PeerSelector
* ps
)
140 HttpRequest
*request
= ps
->request
;
141 assert(request
!= nullptr);
143 if (neighborType(p
, request
->url
) == PEER_SIBLING
) {
144 if (p
->type
== PEER_MULTICAST
&& p
->options
.mcast_siblings
&&
145 (request
->flags
.noCache
|| request
->flags
.refresh
|| request
->flags
.loopDetected
|| request
->flags
.needValidation
))
146 debugs(15, 2, "multicast-siblings optimization match for " << *p
<< ", " << request
->url
.authority());
148 if (request
->flags
.noCache
)
151 if (request
->flags
.refresh
)
154 if (request
->flags
.loopDetected
)
157 if (request
->flags
.needValidation
)
161 // CONNECT requests are proxy requests. Not to be forwarded to origin servers.
162 // Unless the destination port matches, in which case we MAY perform a 'DIRECT' to this CachePeer.
163 if (p
->options
.originserver
&& request
->method
== Http::METHOD_CONNECT
&& request
->url
.port() != p
->http_port
)
166 if (p
->access
== nullptr)
169 ACLFilledChecklist
checklist(p
->access
, request
);
170 checklist
.updateAle(ps
->al
);
171 checklist
.syncAle(request
, nullptr);
172 return checklist
.fastCheck().allowed();
175 /* Return TRUE if it is okay to send an ICP request to this CachePeer. */
177 peerWouldBePinged(const CachePeer
* p
, PeerSelector
* ps
)
180 HttpRequest
*request
= ps
->request
;
182 if (p
->icp
.port
== 0)
185 if (p
->options
.no_query
)
188 if (p
->options
.mcast_responder
)
191 if (p
->n_addresses
== 0)
194 if (p
->options
.background_ping
&& (squid_curtime
- p
->stats
.last_query
< Config
.backgroundPingRate
))
197 /* the case below seems strange, but can happen if the
198 * URL host is on the other side of a firewall */
199 if (p
->type
== PEER_SIBLING
)
200 if (!request
->flags
.hierarchical
)
203 if (!peerAllowedToUse(p
, ps
))
206 /* Ping dead peers every timeout interval */
207 if (squid_curtime
- p
->stats
.last_query
> Config
.Timeout
.deadPeer
)
217 peerCanOpenMore(const CachePeer
*p
)
219 const int effectiveLimit
= p
->max_conn
<= 0 ? Squid_MaxFD
: p
->max_conn
;
220 const int remaining
= effectiveLimit
- p
->stats
.conn_open
;
221 debugs(15, 7, remaining
<< '=' << effectiveLimit
<< '-' << p
->stats
.conn_open
);
222 return remaining
> 0;
226 peerHasConnAvailable(const CachePeer
*p
)
228 // Standby connections can be used without opening new connections.
229 const int standbys
= p
->standby
.pool
? p
->standby
.pool
->count() : 0;
231 // XXX: Some idle pconns can be used without opening new connections.
232 // Complication: Idle pconns cannot be reused for some requests.
233 const int usableIdles
= 0;
235 const int available
= standbys
+ usableIdles
;
236 debugs(15, 7, available
<< '=' << standbys
<< '+' << usableIdles
);
237 return available
> 0;
241 peerConnClosed(CachePeer
*p
)
243 --p
->stats
.conn_open
;
244 if (p
->standby
.waitingForClose
&& peerCanOpenMore(p
)) {
245 p
->standby
.waitingForClose
= false;
246 PeerPoolMgr::Checkpoint(p
->standby
.mgr
, "conn closed");
250 /* Return TRUE if it is okay to send an HTTP request to this CachePeer. */
252 peerHTTPOkay(const CachePeer
* p
, PeerSelector
* ps
)
254 if (!peerCanOpenMore(p
) && !peerHasConnAvailable(p
))
257 if (!peerAllowedToUse(p
, ps
))
267 neighborsCount(PeerSelector
*ps
)
271 for (const auto &p
: CurrentCachePeers())
272 if (peerWouldBePinged(p
.get(), ps
))
275 debugs(15, 3, "neighborsCount: " << count
);
281 getFirstUpParent(PeerSelector
*ps
)
284 HttpRequest
*request
= ps
->request
;
286 for (const auto &peer
: CurrentCachePeers()) {
287 const auto p
= peer
.get();
292 if (neighborType(p
, request
->url
) != PEER_PARENT
)
295 if (!peerHTTPOkay(p
, ps
))
298 debugs(15, 3, "returning " << *p
);
302 debugs(15, 3, "none found");
307 getRoundRobinParent(PeerSelector
*ps
)
310 HttpRequest
*request
= ps
->request
;
312 CachePeer
*q
= nullptr;
314 for (const auto &peer
: CurrentCachePeers()) {
315 const auto p
= peer
.get();
316 if (!p
->options
.roundrobin
)
319 if (neighborType(p
, request
->url
) != PEER_PARENT
)
322 if (!peerHTTPOkay(p
, ps
))
329 if (p
->weight
== q
->weight
) {
330 if (q
->rr_count
< p
->rr_count
)
332 } else if ( ((double) q
->rr_count
/ q
->weight
) < ((double) p
->rr_count
/ p
->weight
)) {
343 debugs(15, 3, "returning " << RawPointer(q
).orNil());
349 getWeightedRoundRobinParent(PeerSelector
*ps
)
352 HttpRequest
*request
= ps
->request
;
354 CachePeer
*q
= nullptr;
357 for (const auto &peer
: CurrentCachePeers()) {
358 const auto p
= peer
.get();
360 if (!p
->options
.weighted_roundrobin
)
363 if (neighborType(p
, request
->url
) != PEER_PARENT
)
366 if (!peerHTTPOkay(p
, ps
))
369 if (q
&& q
->rr_count
< p
->rr_count
)
375 if (q
&& q
->rr_count
> 1000000)
376 for (const auto &p
: CurrentCachePeers()) {
377 if (!p
->options
.weighted_roundrobin
)
380 if (neighborType(p
.get(), request
->url
) != PEER_PARENT
)
387 weighted_rtt
= (q
->stats
.rtt
- q
->basetime
) / q
->weight
;
389 if (weighted_rtt
< 1)
392 q
->rr_count
+= weighted_rtt
;
394 debugs(15, 3, "getWeightedRoundRobinParent: weighted_rtt " << weighted_rtt
);
397 debugs(15, 3, "returning " << RawPointer(q
).orNil());
402 * This gets called every 5 minutes to clear the round-robin counter.
403 * The exact timing is an arbitrary default, set on estimate timing of a
404 * large number of requests in a high-performance environment during the
405 * period. The larger the number of requests between cycled resets the
406 * more balanced the operations.
410 * TODO: Make the reset timing a selectable parameter in squid.conf
413 peerClearRRLoop(void *data
)
416 eventAdd("peerClearRR", peerClearRRLoop
, data
, 5 * 60.0, 0);
420 * This gets called on startup and restart to kick off the CachePeer round-robin
421 * maintenance event. It ensures that no matter how many times its called
422 * no more than one event is scheduled.
425 peerClearRRStart(void)
427 static bool event_added
= false;
429 peerClearRRLoop(nullptr);
435 * Called whenever the round-robin counters need to be reset to a sane state.
436 * So far those times are:
437 * - On startup and reconfigure - to set the counters to sane initial settings.
438 * - When a CachePeer has revived from dead, to prevent the revived CachePeer being
439 * flooded with requests which it has 'missed' during the down period.
444 for (const auto &p
: CurrentCachePeers())
449 peerAlive(CachePeer
*p
)
451 if (p
->stats
.logged_state
== PEER_DEAD
&& p
->tcp_up
) {
452 debugs(15, DBG_IMPORTANT
, "Detected REVIVED " << neighborTypeStr(p
) << ": " << *p
);
453 p
->stats
.logged_state
= PEER_ALIVE
;
455 if (p
->standby
.mgr
.valid())
456 PeerPoolMgr::Checkpoint(p
->standby
.mgr
, "revived peer");
459 p
->stats
.last_reply
= squid_curtime
;
460 p
->stats
.probe_start
= 0;
462 // TODO: Remove or explain how we could detect an alive peer without IP addresses
464 ipcache_nbgethostbyname(p
->host
, peerDNSConfigure
, p
);
468 getDefaultParent(PeerSelector
*ps
)
471 HttpRequest
*request
= ps
->request
;
473 for (const auto &peer
: CurrentCachePeers()) {
474 const auto p
= peer
.get();
476 if (neighborType(p
, request
->url
) != PEER_PARENT
)
479 if (!p
->options
.default_parent
)
482 if (!peerHTTPOkay(p
, ps
))
485 debugs(15, 3, "returning " << *p
);
490 // TODO: Refactor similar get*() functions to use our return/reporting style
491 debugs(15, 3, "none found");
496 neighborsRegisterWithCacheManager()
498 Mgr::RegisterAction("server_list",
499 "Peer Cache Statistics",
500 neighborDumpPeers
, 0, 1);
506 struct servent
*sep
= nullptr;
507 const char *me
= getMyHostname();
509 neighborsRegisterWithCacheManager();
511 if (Comm::IsConnOpen(icpIncomingConn
)) {
512 RawCachePeers peersToRemove
;
514 for (const auto &thisPeer
: CurrentCachePeers()) {
515 if (0 != strcmp(thisPeer
->host
, me
))
518 for (AnyP::PortCfgPointer s
= HttpPortList
; s
!= nullptr; s
= s
->next
) {
519 if (thisPeer
->http_port
!= s
->s
.port())
522 debugs(15, DBG_IMPORTANT
, "WARNING: Peer looks like this host." <<
523 Debug::Extra
<< "Ignoring cache_peer " << *thisPeer
);
525 peersToRemove
.push_back(thisPeer
.get());
526 break; // avoid warning about (and removing) the same CachePeer twice
530 while (peersToRemove
.size()) {
531 const auto p
= peersToRemove
.back();
532 peersToRemove
.pop_back();
537 peerDnsRefreshStart();
539 sep
= getservbyname("echo", "udp");
540 echo_port
= sep
? ntohs((unsigned short) sep
->s_port
) : 7;
544 neighborsUdpPing(HttpRequest
* request
,
551 const char *url
= entry
->url();
552 MemObject
*mem
= entry
->mem_obj
;
555 int peers_pinged
= 0;
556 int parent_timeout
= 0, parent_exprep
= 0;
557 int sibling_timeout
= 0, sibling_exprep
= 0;
558 int mcast_timeout
= 0, mcast_exprep
= 0;
560 if (Config
.peers
== nullptr)
563 assert(!entry
->hasDisk());
565 mem
->start_ping
= current_time
;
567 mem
->ping_reply_callback
= callback
;
571 reqnum
= icpSetCacheKey((const cache_key
*)entry
->key
);
573 const auto savedContext
= CodeContext::Current();
574 for (size_t i
= 0; i
< Config
.peers
->size(); ++i
) {
575 const auto p
= &Config
.peers
->nextPeerToPing(i
);
577 CodeContext::Reset(p
->probeCodeContext
);
579 debugs(15, 5, "candidate: " << *p
);
581 if (!peerWouldBePinged(p
, ps
))
582 continue; /* next CachePeer */
586 debugs(15, 4, "pinging cache_peer " << *p
<< " for '" << url
<< "'");
588 debugs(15, 3, "neighborsUdpPing: key = '" << entry
->getMD5Text() << "'");
590 debugs(15, 3, "neighborsUdpPing: reqnum = " << reqnum
);
593 if (p
->options
.htcp
&& !p
->options
.htcp_only_clr
) {
594 if (Config
.Port
.htcp
<= 0) {
595 debugs(15, DBG_CRITICAL
, "ERROR: HTCP is disabled! Cannot send HTCP request to peer.");
599 debugs(15, 3, "neighborsUdpPing: sending HTCP query");
600 if (htcpQuery(entry
, request
, p
) <= 0)
601 continue; // unable to send.
605 if (Config
.Port
.icp
<= 0 || !Comm::IsConnOpen(icpOutgoingConn
)) {
606 debugs(15, DBG_CRITICAL
, "ERROR: ICP is disabled! Cannot send ICP request to peer.");
610 if (p
->type
== PEER_MULTICAST
)
611 mcastSetTtl(icpOutgoingConn
->fd
, p
->mcast
.ttl
);
613 if (p
->icp
.port
== echo_port
) {
614 debugs(15, 4, "neighborsUdpPing: Looks like a dumb cache, send DECHO ping");
615 // TODO: Get ALE from callback_data if possible.
616 icpCreateAndSend(ICP_DECHO
, 0, url
, reqnum
, 0,
617 icpOutgoingConn
->fd
, p
->in_addr
, nullptr);
621 if (Config
.onoff
.query_icmp
)
622 if (p
->icp
.version
== ICP_VERSION_2
)
623 flags
|= ICP_FLAG_SRC_RTT
;
625 // TODO: Get ALE from callback_data if possible.
626 icpCreateAndSend(ICP_QUERY
, flags
, url
, reqnum
, 0,
627 icpOutgoingConn
->fd
, p
->in_addr
, nullptr);
632 ++ p
->stats
.pings_sent
;
634 if (p
->type
== PEER_MULTICAST
) {
635 mcast_exprep
+= p
->mcast
.n_replies_expected
;
636 mcast_timeout
+= (p
->stats
.rtt
* p
->mcast
.n_replies_expected
);
637 } else if (neighborUp(p
)) {
638 /* its alive, expect a reply from it */
640 if (neighborType(p
, request
->url
) == PEER_PARENT
) {
642 parent_timeout
+= p
->stats
.rtt
;
645 sibling_timeout
+= p
->stats
.rtt
;
648 /* Neighbor is dead; ping it anyway, but don't expect a reply */
649 /* log it once at the threshold */
651 if (p
->stats
.logged_state
== PEER_ALIVE
) {
652 debugs(15, DBG_IMPORTANT
, "Detected DEAD " << neighborTypeStr(p
) << ": " << *p
);
653 p
->stats
.logged_state
= PEER_DEAD
;
657 p
->stats
.last_query
= squid_curtime
;
660 * keep probe_start == 0 for a multicast CachePeer,
661 * so neighborUp() never says this CachePeer is dead.
664 if ((p
->type
!= PEER_MULTICAST
) && (p
->stats
.probe_start
== 0))
665 p
->stats
.probe_start
= squid_curtime
;
667 CodeContext::Reset(savedContext
);
670 * How many replies to expect?
672 *exprep
= parent_exprep
+ sibling_exprep
+ mcast_exprep
;
675 * If there is a configured timeout, use it
677 if (Config
.Timeout
.icp_query
)
678 *timeout
= Config
.Timeout
.icp_query
;
682 *timeout
= 2 * parent_timeout
/ parent_exprep
;
683 else if (mcast_exprep
)
684 *timeout
= 2 * mcast_timeout
/ mcast_exprep
;
686 *timeout
= 2 * sibling_timeout
/ sibling_exprep
;
688 *timeout
= 2000; /* 2 seconds */
690 if (Config
.Timeout
.icp_query_max
)
691 if (*timeout
> Config
.Timeout
.icp_query_max
)
692 *timeout
= Config
.Timeout
.icp_query_max
;
694 if (*timeout
< Config
.Timeout
.icp_query_min
)
695 *timeout
= Config
.Timeout
.icp_query_min
;
701 /* lookup the digest of a given CachePeer */
703 peerDigestLookup(CachePeer
* p
, PeerSelector
* ps
)
705 #if USE_CACHE_DIGESTS
707 HttpRequest
*request
= ps
->request
;
711 debugs(15, 5, "cache_peer " << *p
);
712 /* does the peeer have a valid digest? */
715 debugs(15, 5, "peerDigestLookup: gone!");
717 } else if (!peerHTTPOkay(p
, ps
)) {
718 debugs(15, 5, "peerDigestLookup: !peerHTTPOkay");
720 } else if (!p
->digest
->flags
.needed
) {
721 debugs(15, 5, "peerDigestLookup: note need");
722 peerDigestNeeded(p
->digest
);
724 } else if (!p
->digest
->flags
.usable
) {
725 debugs(15, 5, "peerDigestLookup: !ready && " << (p
->digest
->flags
.requested
? "" : "!") << "requested");
729 debugs(15, 5, "OK to lookup cache_peer " << *p
);
730 assert(p
->digest
->cd
);
731 /* does digest predict a hit? */
733 if (!p
->digest
->cd
->contains(storeKeyPublicByRequest(request
)))
736 debugs(15, 5, "HIT for cache_peer " << *p
);
747 /* select best CachePeer based on cache digests */
749 neighborsDigestSelect(PeerSelector
*ps
)
751 CachePeer
*best_p
= nullptr;
752 #if USE_CACHE_DIGESTS
754 HttpRequest
*request
= ps
->request
;
757 int choice_count
= 0;
758 int ichoice_count
= 0;
764 if (!request
->flags
.hierarchical
)
767 storeKeyPublicByRequest(request
);
769 for (size_t i
= 0; i
< Config
.peers
->size(); ++i
) {
770 const auto p
= &Config
.peers
->nextPeerToPing(i
);
772 const auto lookup
= peerDigestLookup(p
, ps
);
774 if (lookup
== LOOKUP_NONE
)
779 if (lookup
== LOOKUP_MISS
)
782 p_rtt
= netdbHostRtt(p
->host
);
784 debugs(15, 5, "cache_peer " << *p
<< " rtt: " << p_rtt
);
786 /* is this CachePeer better than others in terms of rtt ? */
787 if (!best_p
|| (p_rtt
&& p_rtt
< best_rtt
)) {
791 if (p_rtt
) /* informative choice (aka educated guess) */
794 debugs(15, 4, "cache_peer " << *p
<< " leads with rtt " << best_rtt
);
798 debugs(15, 4, "neighborsDigestSelect: choices: " << choice_count
<< " (" << ichoice_count
<< ")");
799 peerNoteDigestLookup(request
, best_p
,
800 best_p
? LOOKUP_HIT
: (choice_count
? LOOKUP_MISS
: LOOKUP_NONE
));
801 request
->hier
.n_choices
= choice_count
;
802 request
->hier
.n_ichoices
= ichoice_count
;
811 peerNoteDigestLookup(HttpRequest
* request
, CachePeer
* p
, lookup_t lookup
)
813 #if USE_CACHE_DIGESTS
815 strncpy(request
->hier
.cd_host
, p
->host
, sizeof(request
->hier
.cd_host
)-1);
817 *request
->hier
.cd_host
= '\0';
819 request
->hier
.cd_lookup
= lookup
;
820 debugs(15, 4, "cache_peer " << RawPointer(p
).orNil() << ", lookup: " << lookup_t_str
[lookup
]);
829 neighborAlive(CachePeer
* p
, const MemObject
*, const icp_common_t
* header
)
832 ++ p
->stats
.pings_acked
;
834 if ((icp_opcode
) header
->opcode
<= ICP_END
)
835 ++ p
->icp
.counts
[header
->opcode
];
837 p
->icp
.version
= (int) header
->version
;
841 neighborUpdateRtt(CachePeer
* p
, MemObject
* mem
)
843 int rtt
, rtt_av_factor
;
848 if (!mem
->start_ping
.tv_sec
)
851 rtt
= tvSubMsec(mem
->start_ping
, current_time
);
853 if (rtt
< 1 || rtt
> 10000)
856 rtt_av_factor
= RTT_AV_FACTOR
;
858 if (p
->options
.weighted_roundrobin
)
859 rtt_av_factor
= RTT_BACKGROUND_AV_FACTOR
;
861 p
->stats
.rtt
= Math::intAverage(p
->stats
.rtt
, rtt
, p
->stats
.pings_acked
, rtt_av_factor
);
866 neighborAliveHtcp(CachePeer
* p
, const MemObject
*, const HtcpReplyData
* htcp
)
869 ++ p
->stats
.pings_acked
;
870 ++ p
->htcp
.counts
[htcp
->hit
? 1 : 0];
871 p
->htcp
.version
= htcp
->version
;
877 neighborCountIgnored(CachePeer
* p
)
882 ++ p
->stats
.ignored_replies
;
888 neighborIgnoreNonPeer(const Ip::Address
&from
, icp_opcode opcode
)
890 static uint64_t ignoredReplies
= 0;
891 if (isPowTen(++ignoredReplies
)) {
892 debugs(15, DBG_IMPORTANT
, "WARNING: Ignored " << ignoredReplies
<< " ICP replies from non-peers" <<
893 Debug::Extra
<< "last seen non-peer source address: " << from
<<
894 Debug::Extra
<< "last seen ICP reply opcode: " << icp_opcode_str
[opcode
]);
898 /* ignoreMulticastReply
900 * * We want to ignore replies from multicast peers if the
901 * * cache_host_domain rules would normally prevent the CachePeer
905 ignoreMulticastReply(CachePeer
* p
, PeerSelector
* ps
)
910 if (!p
->options
.mcast_responder
)
913 if (peerHTTPOkay(p
, ps
))
920 * I should attach these records to the entry. We take the first
921 * hit we get our wait until everyone misses. The timeout handler
922 * call needs to nip this shopping list or call one of the misses.
924 * If a hit process is already started, then sobeit
927 neighborsUdpAck(const cache_key
* key
, icp_common_t
* header
, const Ip::Address
&from
)
929 CachePeer
*p
= nullptr;
931 MemObject
*mem
= nullptr;
932 peer_t ntype
= PEER_NONE
;
933 icp_opcode opcode
= (icp_opcode
) header
->opcode
;
935 debugs(15, 6, "neighborsUdpAck: opcode " << opcode
<< " '" << storeKeyText(key
) << "'");
937 if ((entry
= Store::Root().findCallbackXXX(key
)))
938 mem
= entry
->mem_obj
;
940 if ((p
= whichPeer(from
)))
941 neighborAlive(p
, mem
, header
);
943 if (opcode
> ICP_END
)
946 const char *opcode_d
= icp_opcode_str
[opcode
];
949 neighborUpdateRtt(p
, mem
);
951 /* Does the entry exist? */
952 if (nullptr == entry
) {
953 debugs(12, 3, "neighborsUdpAck: Cache key '" << storeKeyText(key
) << "' not found");
954 neighborCountIgnored(p
);
958 /* check if someone is already fetching it */
959 if (EBIT_TEST(entry
->flags
, ENTRY_DISPATCHED
)) {
960 debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key
) << "' already being fetched.");
961 neighborCountIgnored(p
);
965 if (mem
== nullptr) {
966 debugs(15, 2, "Ignoring " << opcode_d
<< " for missing mem_obj: " << storeKeyText(key
));
967 neighborCountIgnored(p
);
971 if (entry
->ping_status
!= PING_WAITING
) {
972 debugs(15, 2, "neighborsUdpAck: Late " << opcode_d
<< " for " << storeKeyText(key
));
973 neighborCountIgnored(p
);
977 if (!entry
->locked()) {
978 // TODO: many entries are unlocked; why is this reported at level 1?
979 debugs(12, DBG_IMPORTANT
, "neighborsUdpAck: '" << storeKeyText(key
) << "' has no locks");
980 neighborCountIgnored(p
);
984 if (!mem
->ircb_data
) {
985 debugs(12, DBG_IMPORTANT
, "ERROR: Squid BUG: missing ICP callback data for " << *entry
);
986 neighborCountIgnored(p
);
990 debugs(15, 3, opcode_d
<< " for " << storeKeyText(key
) << " from " << RawPointer(p
).orNil("source"));
993 ntype
= neighborType(p
, mem
->request
->url
);
996 if (ignoreMulticastReply(p
, mem
->ircb_data
)) {
997 neighborCountIgnored(p
);
998 } else if (opcode
== ICP_MISS
) {
1000 neighborIgnoreNonPeer(from
, opcode
);
1002 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1004 } else if (opcode
== ICP_HIT
) {
1006 neighborIgnoreNonPeer(from
, opcode
);
1008 header
->opcode
= ICP_HIT
;
1009 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1011 } else if (opcode
== ICP_DECHO
) {
1013 neighborIgnoreNonPeer(from
, opcode
);
1014 } else if (ntype
== PEER_SIBLING
) {
1015 debug_trap("neighborsUdpAck: Found non-ICP cache as SIBLING\n");
1016 debug_trap("neighborsUdpAck: non-ICP neighbors must be a PARENT\n");
1018 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1020 } else if (opcode
== ICP_SECHO
) {
1022 debugs(15, DBG_IMPORTANT
, "Ignoring SECHO from neighbor " << *p
);
1023 neighborCountIgnored(p
);
1025 debugs(15, DBG_IMPORTANT
, "Unsolicited SECHO from " << from
);
1027 } else if (opcode
== ICP_DENIED
) {
1029 neighborIgnoreNonPeer(from
, opcode
);
1030 } else if (p
->stats
.pings_acked
> 100) {
1031 if (100 * p
->icp
.counts
[ICP_DENIED
] / p
->stats
.pings_acked
> 95) {
1032 debugs(15, DBG_CRITICAL
, "Disabling cache_peer " << *p
<<
1033 " because over 95% of its replies are UDP_DENIED");
1034 DeleteConfigured(p
);
1037 neighborCountIgnored(p
);
1040 } else if (opcode
== ICP_MISS_NOFETCH
) {
1041 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_ICP
, header
, mem
->ircb_data
);
1043 debugs(15, DBG_CRITICAL
, "ERROR: neighborsUdpAck: Unexpected ICP reply: " << opcode_d
);
1048 findCachePeerByName(const char * const name
)
1050 for (const auto &p
: CurrentCachePeers()) {
1051 if (!strcasecmp(name
, p
->name
))
1058 neighborUp(const CachePeer
* p
)
1061 CallService(p
->probeCodeContext
, [&] {
1062 peerProbeConnect(const_cast<CachePeer
*>(p
));
1068 * The CachePeer can not be UP if we don't have any IP addresses
1071 if (0 == p
->n_addresses
) {
1072 debugs(15, 8, "DOWN (no-ip): " << *p
);
1076 if (p
->options
.no_query
) {
1077 debugs(15, 8, "UP (no-query): " << *p
);
1081 if (p
->stats
.probe_start
!= 0 &&
1082 squid_curtime
- p
->stats
.probe_start
> Config
.Timeout
.deadPeer
) {
1083 debugs(15, 8, "DOWN (dead): " << *p
);
1087 debugs(15, 8, "UP: " << *p
);
1092 positiveTimeout(const time_t timeout
)
1094 return max(static_cast<time_t>(1), timeout
);
1098 peerDNSConfigure(const ipcache_addrs
*ia
, const Dns::LookupDetails
&, void *data
)
1100 // TODO: connections to no-longer valid IP addresses should be
1101 // closed when we can detect such IP addresses.
1103 CachePeer
*p
= (CachePeer
*)data
;
1105 if (p
->n_addresses
== 0) {
1106 debugs(15, Important(29), "Configuring " << neighborTypeStr(p
) << " " << *p
);
1108 if (p
->type
== PEER_MULTICAST
)
1109 debugs(15, DBG_IMPORTANT
, " Multicast TTL = " << p
->mcast
.ttl
);
1114 if (ia
== nullptr) {
1115 debugs(0, DBG_CRITICAL
, "WARNING: DNS lookup for '" << *p
<< "' failed!");
1120 debugs(0, DBG_CRITICAL
, "WARNING: No IP address found for '" << *p
<< "'!");
1124 for (const auto &ip
: ia
->goodAndBad()) { // TODO: Consider using just good().
1125 if (p
->n_addresses
< PEER_MAX_ADDRESSES
) {
1126 const auto idx
= p
->n_addresses
++;
1127 p
->addresses
[idx
] = ip
;
1128 debugs(15, 2, "--> IP address #" << idx
<< ": " << p
->addresses
[idx
]);
1130 debugs(15, 3, "ignoring remaining " << (ia
->size() - p
->n_addresses
) << " ips");
1135 p
->in_addr
.setEmpty();
1136 p
->in_addr
= p
->addresses
[0];
1137 p
->in_addr
.port(p
->icp
.port
);
1139 peerProbeConnect(p
, true); // detect any died or revived peers ASAP
1141 if (p
->type
== PEER_MULTICAST
)
1142 peerCountMcastPeersSchedule(p
, 10);
1145 if (p
->type
!= PEER_MULTICAST
&& IamWorkerProcess())
1146 if (!p
->options
.no_netdb_exchange
)
1147 eventAddIsh("netdbExchangeStart", netdbExchangeStart
, p
, 30.0, 1);
1150 if (p
->standby
.mgr
.valid())
1151 PeerPoolMgr::Checkpoint(p
->standby
.mgr
, "resolved peer");
1155 peerScheduleDnsRefreshCheck(const double delayInSeconds
)
1157 if (eventFind(peerDnsRefreshCheck
, nullptr))
1158 eventDelete(peerDnsRefreshCheck
, nullptr);
1159 eventAddIsh("peerDnsRefreshCheck", peerDnsRefreshCheck
, nullptr, delayInSeconds
, 1);
1163 peerDnsRefreshCheck(void *)
1165 if (!statSawRecentRequests()) {
1166 /* no recent client traffic, wait a bit */
1167 peerScheduleDnsRefreshCheck(180.0);
1171 peerDnsRefreshStart();
1175 peerDnsRefreshStart()
1177 const auto savedContext
= CodeContext::Current();
1178 for (const auto &p
: CurrentCachePeers()) {
1179 CodeContext::Reset(p
->probeCodeContext
);
1180 ipcache_nbgethostbyname(p
->host
, peerDNSConfigure
, p
.get());
1182 CodeContext::Reset(savedContext
);
1184 peerScheduleDnsRefreshCheck(3600.0);
1187 /// whether new TCP probes are currently banned
1189 peerProbeIsBusy(const CachePeer
*p
)
1191 if (p
->testing_now
> 0) {
1192 debugs(15, 8, "yes, probing " << p
);
1195 if (squid_curtime
- p
->stats
.last_connect_probe
== 0) {
1196 debugs(15, 8, "yes, just probed " << p
);
1202 * peerProbeConnect will be called on dead peers by neighborUp
1205 peerProbeConnect(CachePeer
*p
, const bool reprobeIfBusy
)
1207 if (peerProbeIsBusy(p
)) {
1208 p
->reprobe
= reprobeIfBusy
;
1213 const auto ctimeout
= p
->connectTimeout();
1214 /* for each IP address of this CachePeer. find one that we can connect to and probe it. */
1215 for (int i
= 0; i
< p
->n_addresses
; ++i
) {
1216 Comm::ConnectionPointer conn
= new Comm::Connection
;
1217 conn
->remote
= p
->addresses
[i
];
1218 conn
->remote
.port(p
->http_port
);
1220 getOutgoingAddress(nullptr, conn
);
1224 AsyncCall::Pointer call
= commCbCall(15,3, "peerProbeConnectDone", CommConnectCbPtrFun(peerProbeConnectDone
, p
));
1225 Comm::ConnOpener
*cs
= new Comm::ConnOpener(conn
, call
, ctimeout
);
1226 cs
->setHost(p
->host
);
1227 AsyncJob::Start(cs
);
1230 p
->stats
.last_connect_probe
= squid_curtime
;
1234 peerProbeConnectDone(const Comm::ConnectionPointer
&conn
, Comm::Flag status
, int, void *data
)
1236 CachePeer
*p
= (CachePeer
*)data
;
1238 if (status
== Comm::OK
)
1245 // TODO: log this traffic.
1248 peerProbeConnect(p
);
1252 peerCountMcastPeersSchedule(CachePeer
* p
, time_t when
)
1254 if (p
->mcast
.flags
.count_event_pending
)
1257 eventAdd("peerCountMcastPeersStart",
1258 peerCountMcastPeersStart
,
1262 p
->mcast
.flags
.count_event_pending
= true;
1266 peerCountMcastPeersStart(void *data
)
1268 const auto peer
= static_cast<CachePeer
*>(data
);
1269 CallContextCreator([peer
] {
1270 peerCountMcastPeersCreateAndSend(peer
);
1272 peerCountMcastPeersSchedule(peer
, MCAST_COUNT_RATE
);
1275 /// initiates an ICP transaction to a multicast peer
1277 peerCountMcastPeersCreateAndSend(CachePeer
* const p
)
1279 // XXX: Do not create lots of complex fake objects (while abusing their
1280 // APIs) to pass around a few basic data points like start_ping and ping!
1283 // TODO: use class AnyP::Uri instead of constructing and re-parsing a string
1284 LOCAL_ARRAY(char, url
, MAX_URL
);
1285 assert(p
->type
== PEER_MULTICAST
);
1286 p
->mcast
.flags
.count_event_pending
= false;
1287 snprintf(url
, MAX_URL
, "http://");
1288 p
->in_addr
.toUrl(url
+7, MAX_URL
-8 );
1290 const auto mx
= MasterXaction::MakePortless
<XactionInitiator::initPeerMcast
>();
1291 auto *req
= HttpRequest::FromUrlXXX(url
, mx
);
1292 assert(req
!= nullptr);
1293 const AccessLogEntry::Pointer ale
= new AccessLogEntry
;
1295 CodeContext::Reset(ale
);
1296 StoreEntry
*fake
= storeCreateEntry(url
, url
, RequestFlags(), Http::METHOD_GET
);
1297 const auto psstate
= new PeerSelector(nullptr);
1298 psstate
->request
= req
;
1299 HTTPMSGLOCK(psstate
->request
);
1300 psstate
->entry
= fake
;
1301 psstate
->peerCountMcastPeerXXX
= cbdataReference(p
);
1302 psstate
->ping
.start
= current_time
;
1304 mem
= fake
->mem_obj
;
1305 mem
->request
= psstate
->request
;
1306 mem
->start_ping
= current_time
;
1307 mem
->ping_reply_callback
= peerCountHandleIcpReply
;
1308 mem
->ircb_data
= psstate
;
1309 mcastSetTtl(icpOutgoingConn
->fd
, p
->mcast
.ttl
);
1310 p
->mcast
.id
= mem
->id
;
1311 reqnum
= icpSetCacheKey((const cache_key
*)fake
->key
);
1312 icpCreateAndSend(ICP_QUERY
, 0, url
, reqnum
, 0,
1313 icpOutgoingConn
->fd
, p
->in_addr
, psstate
->al
);
1314 fake
->ping_status
= PING_WAITING
; // TODO: refactor to use PeerSelector::startPingWaiting()
1315 eventAdd("peerCountMcastPeersDone",
1316 peerCountMcastPeersDone
,
1318 Config
.Timeout
.mcast_icp_query
/ 1000.0, 1);
1319 p
->mcast
.flags
.counting
= true;
1323 peerCountMcastPeersDone(void *data
)
1325 const auto psstate
= static_cast<PeerSelector
*>(data
);
1326 CallBack(psstate
->al
, [psstate
] {
1327 peerCountMcastPeersAbort(psstate
);
1332 /// ends counting of multicast ICP replies
1333 /// to the ICP query initiated by peerCountMcastPeersCreateAndSend()
1335 peerCountMcastPeersAbort(PeerSelector
* const psstate
)
1337 StoreEntry
*fake
= psstate
->entry
;
1339 if (cbdataReferenceValid(psstate
->peerCountMcastPeerXXX
)) {
1340 CachePeer
*p
= (CachePeer
*)psstate
->peerCountMcastPeerXXX
;
1341 p
->mcast
.flags
.counting
= false;
1342 p
->mcast
.avg_n_members
= Math::doubleAverage(p
->mcast
.avg_n_members
, (double) psstate
->ping
.n_recv
, ++p
->mcast
.n_times_counted
, 10);
1343 debugs(15, DBG_IMPORTANT
, "Group " << *p
<< ": " << psstate
->ping
.n_recv
<<
1344 " replies, "<< std::setw(4)<< std::setprecision(2) <<
1345 p
->mcast
.avg_n_members
<<" average, RTT " << p
->stats
.rtt
);
1346 p
->mcast
.n_replies_expected
= (int) p
->mcast
.avg_n_members
;
1349 cbdataReferenceDone(psstate
->peerCountMcastPeerXXX
);
1351 fake
->abort(); // sets ENTRY_ABORTED and initiates related cleanup
1352 fake
->mem_obj
->request
= nullptr;
1353 fake
->unlock("peerCountMcastPeersDone");
1357 peerCountHandleIcpReply(CachePeer
* p
, peer_t
, AnyP::ProtocolType proto
, void *, void *data
)
1359 const auto psstate
= static_cast<PeerSelector
*>(data
);
1360 StoreEntry
*fake
= psstate
->entry
;
1362 MemObject
*mem
= fake
->mem_obj
;
1364 int rtt
= tvSubMsec(mem
->start_ping
, current_time
);
1365 assert(proto
== AnyP::PROTO_ICP
);
1366 ++ psstate
->ping
.n_recv
;
1367 int rtt_av_factor
= RTT_AV_FACTOR
;
1369 if (p
->options
.weighted_roundrobin
)
1370 rtt_av_factor
= RTT_BACKGROUND_AV_FACTOR
;
1372 p
->stats
.rtt
= Math::intAverage(p
->stats
.rtt
, rtt
, psstate
->ping
.n_recv
, rtt_av_factor
);
1376 neighborDumpPeers(StoreEntry
* sentry
)
1378 dump_peers(sentry
, Config
.peers
);
1382 dump_peer_options(StoreEntry
* sentry
, CachePeer
* p
)
1384 PackableStream
os(*sentry
);
1386 if (p
->options
.proxy_only
)
1387 os
<< " proxy-only";
1389 if (p
->options
.no_query
)
1392 if (p
->options
.background_ping
)
1393 os
<< " background-ping";
1395 if (p
->options
.no_digest
)
1398 if (p
->options
.default_parent
)
1401 if (p
->options
.roundrobin
)
1402 os
<< " round-robin";
1404 if (p
->options
.carp
)
1408 if (p
->options
.userhash
)
1412 if (p
->options
.sourcehash
)
1413 os
<< " sourcehash";
1415 if (p
->options
.weighted_roundrobin
)
1416 os
<< " weighted-round-robin";
1418 if (p
->options
.mcast_responder
)
1419 os
<< " multicast-responder";
1421 if (p
->options
.mcast_siblings
)
1422 os
<< " multicast-siblings";
1425 os
<< " weight=" << p
->weight
;
1427 if (p
->options
.closest_only
)
1428 os
<< " closest-only";
1431 if (p
->options
.htcp
) {
1433 std::vector
<const char *, PoolingAllocator
<const char *> > opts
;
1434 if (p
->options
.htcp_oldsquid
)
1435 opts
.push_back("oldsquid");
1436 if (p
->options
.htcp_no_clr
)
1437 opts
.push_back("no-clr");
1438 if (p
->options
.htcp_no_purge_clr
)
1439 opts
.push_back("no-purge-clr");
1440 if (p
->options
.htcp_only_clr
)
1441 opts
.push_back("only-clr");
1442 if (p
->options
.htcp_forward_clr
)
1443 opts
.push_back("forward-clr");
1444 os
<< AsList(opts
).prefixedBy("=").delimitedBy(",");
1448 if (p
->options
.no_netdb_exchange
)
1449 os
<< " no-netdb-exchange";
1452 if (p
->options
.no_delay
)
1457 os
<< " login=" << p
->login
;
1459 if (p
->mcast
.ttl
> 0)
1460 os
<< " ttl=" << p
->mcast
.ttl
;
1462 if (p
->connect_timeout_raw
> 0)
1463 os
<< " connect-timeout=" << p
->connect_timeout_raw
;
1465 if (p
->connect_fail_limit
!= PEER_TCP_MAGIC_COUNT
)
1466 os
<< " connect-fail-limit=" << p
->connect_fail_limit
;
1468 #if USE_CACHE_DIGESTS
1471 os
<< " digest-url=" << p
->digest_url
;
1475 if (p
->options
.allow_miss
)
1476 os
<< " allow-miss";
1478 if (p
->options
.no_tproxy
)
1481 if (p
->max_conn
> 0)
1482 os
<< " max-conn=" << p
->max_conn
;
1484 if (p
->standby
.limit
> 0)
1485 os
<< " standby=" << p
->standby
.limit
;
1487 if (p
->options
.originserver
)
1488 os
<< " originserver";
1491 os
<< " forceddomain=" << p
->domain
;
1493 if (p
->connection_auth
== 0)
1494 os
<< " connection-auth=off";
1495 else if (p
->connection_auth
== 1)
1496 os
<< " connection-auth=on";
1497 else if (p
->connection_auth
== 2)
1498 os
<< " connection-auth=auto";
1500 p
->secure
.dumpCfg(os
, "tls-");
1505 dump_peers(StoreEntry
*sentry
, CachePeers
*peers
)
1507 char ntoabuf
[MAX_IPSTRLEN
];
1511 storeAppendPrintf(sentry
, "There are no neighbors installed.\n");
1515 for (const auto &peer
: *peers
) {
1516 const auto e
= peer
.get();
1517 assert(e
->host
!= nullptr);
1518 storeAppendPrintf(sentry
, "\n%-11.11s: %s\n",
1521 storeAppendPrintf(sentry
, "Host : %s/%d/%d\n",
1525 storeAppendPrintf(sentry
, "Flags :");
1526 dump_peer_options(sentry
, e
);
1528 for (i
= 0; i
< e
->n_addresses
; ++i
) {
1529 storeAppendPrintf(sentry
, "Address[%d] : %s\n", i
,
1530 e
->addresses
[i
].toStr(ntoabuf
,MAX_IPSTRLEN
) );
1533 storeAppendPrintf(sentry
, "Status : %s\n",
1534 neighborUp(e
) ? "Up" : "Down");
1535 storeAppendPrintf(sentry
, "FETCHES : %d\n", e
->stats
.fetches
);
1536 storeAppendPrintf(sentry
, "OPEN CONNS : %d\n", e
->stats
.conn_open
);
1537 storeAppendPrintf(sentry
, "AVG RTT : %d msec\n", e
->stats
.rtt
);
1539 if (!e
->options
.no_query
) {
1540 storeAppendPrintf(sentry
, "LAST QUERY : %8d seconds ago\n",
1541 (int) (squid_curtime
- e
->stats
.last_query
));
1543 if (e
->stats
.last_reply
> 0)
1544 storeAppendPrintf(sentry
, "LAST REPLY : %8d seconds ago\n",
1545 (int) (squid_curtime
- e
->stats
.last_reply
));
1547 storeAppendPrintf(sentry
, "LAST REPLY : none received\n");
1549 storeAppendPrintf(sentry
, "PINGS SENT : %8d\n", e
->stats
.pings_sent
);
1551 storeAppendPrintf(sentry
, "PINGS ACKED: %8d %3d%%\n",
1552 e
->stats
.pings_acked
,
1553 Math::intPercent(e
->stats
.pings_acked
, e
->stats
.pings_sent
));
1556 storeAppendPrintf(sentry
, "IGNORED : %8d %3d%%\n", e
->stats
.ignored_replies
, Math::intPercent(e
->stats
.ignored_replies
, e
->stats
.pings_acked
));
1558 if (!e
->options
.no_query
) {
1559 storeAppendPrintf(sentry
, "Histogram of PINGS ACKED:\n");
1562 if (e
->options
.htcp
) {
1563 storeAppendPrintf(sentry
, "\tMisses\t%8d %3d%%\n",
1565 Math::intPercent(e
->htcp
.counts
[0], e
->stats
.pings_acked
));
1566 storeAppendPrintf(sentry
, "\tHits\t%8d %3d%%\n",
1568 Math::intPercent(e
->htcp
.counts
[1], e
->stats
.pings_acked
));
1572 for (auto op
: WholeEnum
<icp_opcode
>()) {
1573 if (e
->icp
.counts
[op
] == 0)
1576 storeAppendPrintf(sentry
, " %12.12s : %8d %3d%%\n",
1579 Math::intPercent(e
->icp
.counts
[op
], e
->stats
.pings_acked
));
1590 if (e
->stats
.last_connect_failure
) {
1591 storeAppendPrintf(sentry
, "Last failed connect() at: %s\n",
1592 Time::FormatHttpd(e
->stats
.last_connect_failure
));
1595 storeAppendPrintf(sentry
, "keep-alive ratio: %d%%\n", Math::intPercent(e
->stats
.n_keepalives_recv
, e
->stats
.n_keepalives_sent
));
1601 neighborsHtcpReply(const cache_key
* key
, HtcpReplyData
* htcp
, const Ip::Address
&from
)
1603 StoreEntry
*e
= Store::Root().findCallbackXXX(key
);
1604 MemObject
*mem
= nullptr;
1606 peer_t ntype
= PEER_NONE
;
1607 debugs(15, 6, "neighborsHtcpReply: " <<
1608 (htcp
->hit
? "HIT" : "MISS") << " " <<
1609 storeKeyText(key
) );
1614 if ((p
= whichPeer(from
)))
1615 neighborAliveHtcp(p
, mem
, htcp
);
1617 /* Does the entry exist? */
1619 debugs(12, 3, "neighyborsHtcpReply: Cache key '" << storeKeyText(key
) << "' not found");
1620 neighborCountIgnored(p
);
1624 /* check if someone is already fetching it */
1625 if (EBIT_TEST(e
->flags
, ENTRY_DISPATCHED
)) {
1626 debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key
) << "' already being fetched.");
1627 neighborCountIgnored(p
);
1631 if (mem
== nullptr) {
1632 debugs(15, 2, "Ignoring reply for missing mem_obj: " << storeKeyText(key
));
1633 neighborCountIgnored(p
);
1637 if (e
->ping_status
!= PING_WAITING
) {
1638 debugs(15, 2, "neighborsUdpAck: Entry " << storeKeyText(key
) << " is not PING_WAITING");
1639 neighborCountIgnored(p
);
1644 // TODO: many entries are unlocked; why is this reported at level 1?
1645 debugs(12, DBG_IMPORTANT
, "neighborsUdpAck: '" << storeKeyText(key
) << "' has no locks");
1646 neighborCountIgnored(p
);
1650 if (!mem
->ircb_data
) {
1651 debugs(12, DBG_IMPORTANT
, "ERROR: Squid BUG: missing HTCP callback data for " << *e
);
1652 neighborCountIgnored(p
);
1657 ntype
= neighborType(p
, mem
->request
->url
);
1658 neighborUpdateRtt(p
, mem
);
1661 if (ignoreMulticastReply(p
, mem
->ircb_data
)) {
1662 neighborCountIgnored(p
);
1666 debugs(15, 3, "neighborsHtcpReply: e = " << e
);
1667 // TODO: Refactor (ping_reply_callback,ircb_data) to add CodeContext.
1668 mem
->ping_reply_callback(p
, ntype
, AnyP::PROTO_HTCP
, htcp
, mem
->ircb_data
);
1672 * Send HTCP CLR messages to all peers configured to receive them.
1675 neighborsHtcpClear(StoreEntry
* e
, HttpRequest
* req
, const HttpRequestMethod
&method
, htcp_clr_reason reason
)
1679 for (const auto &p
: CurrentCachePeers()) {
1680 if (!p
->options
.htcp
) {
1683 if (p
->options
.htcp_no_clr
) {
1686 if (p
->options
.htcp_no_purge_clr
&& reason
== HTCP_CLR_PURGE
) {
1689 debugs(15, 3, "neighborsHtcpClear: sending CLR to " << p
->in_addr
.toUrl(buf
, 128));
1690 htcpClear(e
, req
, method
, p
.get(), reason
);