]> git.ipfire.org Git - thirdparty/squid.git/blob - src/neighbors.cc
766d912e6c310dab4b64d96984694f8e5c81afbb
[thirdparty/squid.git] / src / neighbors.cc
1 /*
2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 15 Neighbor Routines */
10
11 #include "squid.h"
12 #include "acl/FilledChecklist.h"
13 #include "anyp/PortCfg.h"
14 #include "base/EnumIterator.h"
15 #include "base/IoManip.h"
16 #include "CacheDigest.h"
17 #include "CachePeer.h"
18 #include "comm/Connection.h"
19 #include "comm/ConnOpener.h"
20 #include "debug/Messages.h"
21 #include "event.h"
22 #include "FwdState.h"
23 #include "globals.h"
24 #include "htcp.h"
25 #include "HttpRequest.h"
26 #include "icmp/net_db.h"
27 #include "ICP.h"
28 #include "int.h"
29 #include "ip/Address.h"
30 #include "ip/tools.h"
31 #include "ipcache.h"
32 #include "MemObject.h"
33 #include "mgr/Registration.h"
34 #include "multicast.h"
35 #include "neighbors.h"
36 #include "NeighborTypeDomainList.h"
37 #include "pconn.h"
38 #include "PeerDigest.h"
39 #include "PeerPoolMgr.h"
40 #include "PeerSelectState.h"
41 #include "RequestFlags.h"
42 #include "SquidConfig.h"
43 #include "SquidMath.h"
44 #include "stat.h"
45 #include "Store.h"
46 #include "store_key_md5.h"
47 #include "tools.h"
48
49 /* count mcast group peers every 15 minutes */
50 #define MCAST_COUNT_RATE 900
51
52 bool peerAllowedToUse(const CachePeer *, PeerSelector *);
53 static int peerWouldBePinged(const CachePeer *, PeerSelector *);
54 static void neighborRemove(CachePeer *);
55 static void neighborAlive(CachePeer *, const MemObject *, const icp_common_t *);
56 #if USE_HTCP
57 static void neighborAliveHtcp(CachePeer *, const MemObject *, const HtcpReplyData *);
58 #endif
59 static void neighborCountIgnored(CachePeer *);
60 static void peerRefreshDNS(void *);
61 static IPH peerDNSConfigure;
62 static void peerProbeConnect(CachePeer *, const bool reprobeIfBusy = false);
63 static CNCB peerProbeConnectDone;
64 static void peerCountMcastPeersDone(void *data);
65 static void peerCountMcastPeersStart(void *data);
66 static void peerCountMcastPeersSchedule(CachePeer * p, time_t when);
67 static void peerCountMcastPeersAbort(PeerSelector *);
68 static void peerCountMcastPeersCreateAndSend(CachePeer *p);
69 static IRCB peerCountHandleIcpReply;
70
71 static void neighborIgnoreNonPeer(const Ip::Address &, icp_opcode);
72 static OBJH neighborDumpPeers;
73 static OBJH neighborDumpNonPeers;
74 static void dump_peers(StoreEntry * sentry, CachePeer * peers);
75
76 static unsigned short echo_port;
77
78 static int NLateReplies = 0;
79 static CachePeer *first_ping = nullptr;
80
81 const char *
82 neighborTypeStr(const CachePeer * p)
83 {
84 if (p->type == PEER_NONE)
85 return "Non-Peer";
86
87 if (p->type == PEER_SIBLING)
88 return "Sibling";
89
90 if (p->type == PEER_MULTICAST)
91 return "Multicast Group";
92
93 return "Parent";
94 }
95
96 CachePeer *
97 whichPeer(const Ip::Address &from)
98 {
99 int j;
100
101 CachePeer *p = nullptr;
102 debugs(15, 3, "whichPeer: from " << from);
103
104 for (p = Config.peers; p; p = p->next) {
105 for (j = 0; j < p->n_addresses; ++j) {
106 if (from == p->addresses[j] && from.port() == p->icp.port) {
107 return p;
108 }
109 }
110 }
111
112 return nullptr;
113 }
114
115 peer_t
116 neighborType(const CachePeer * p, const AnyP::Uri &url)
117 {
118
119 const NeighborTypeDomainList *d = nullptr;
120
121 for (d = p->typelist; d; d = d->next) {
122 if (0 == matchDomainName(url.host(), d->domain))
123 if (d->type != PEER_NONE)
124 return d->type;
125 }
126 #if PEER_MULTICAST_SIBLINGS
127 if (p->type == PEER_MULTICAST)
128 if (p->options.mcast_siblings)
129 return PEER_SIBLING;
130 #endif
131
132 return p->type;
133 }
134
135 /**
136 * \return Whether it is appropriate to fetch REQUEST from PEER.
137 */
138 bool
139 peerAllowedToUse(const CachePeer * p, PeerSelector * ps)
140 {
141 assert(ps);
142 HttpRequest *request = ps->request;
143 assert(request != nullptr);
144
145 if (neighborType(p, request->url) == PEER_SIBLING) {
146 #if PEER_MULTICAST_SIBLINGS
147 if (p->type == PEER_MULTICAST && p->options.mcast_siblings &&
148 (request->flags.noCache || request->flags.refresh || request->flags.loopDetected || request->flags.needValidation))
149 debugs(15, 2, "multicast-siblings optimization match for " << *p << ", " << request->url.authority());
150 #endif
151 if (request->flags.noCache)
152 return false;
153
154 if (request->flags.refresh)
155 return false;
156
157 if (request->flags.loopDetected)
158 return false;
159
160 if (request->flags.needValidation)
161 return false;
162 }
163
164 // CONNECT requests are proxy requests. Not to be forwarded to origin servers.
165 // Unless the destination port matches, in which case we MAY perform a 'DIRECT' to this CachePeer.
166 if (p->options.originserver && request->method == Http::METHOD_CONNECT && request->url.port() != p->http_port)
167 return false;
168
169 if (p->access == nullptr)
170 return true;
171
172 ACLFilledChecklist checklist(p->access, request, nullptr);
173 checklist.al = ps->al;
174 if (ps->al && ps->al->reply) {
175 checklist.reply = ps->al->reply.getRaw();
176 HTTPMSGLOCK(checklist.reply);
177 }
178 checklist.syncAle(request, nullptr);
179 return checklist.fastCheck().allowed();
180 }
181
182 /* Return TRUE if it is okay to send an ICP request to this CachePeer. */
183 static int
184 peerWouldBePinged(const CachePeer * p, PeerSelector * ps)
185 {
186 assert(ps);
187 HttpRequest *request = ps->request;
188
189 if (p->icp.port == 0)
190 return 0;
191
192 if (p->options.no_query)
193 return 0;
194
195 if (p->options.mcast_responder)
196 return 0;
197
198 if (p->n_addresses == 0)
199 return 0;
200
201 if (p->options.background_ping && (squid_curtime - p->stats.last_query < Config.backgroundPingRate))
202 return 0;
203
204 /* the case below seems strange, but can happen if the
205 * URL host is on the other side of a firewall */
206 if (p->type == PEER_SIBLING)
207 if (!request->flags.hierarchical)
208 return 0;
209
210 if (!peerAllowedToUse(p, ps))
211 return 0;
212
213 /* Ping dead peers every timeout interval */
214 if (squid_curtime - p->stats.last_query > Config.Timeout.deadPeer)
215 return 1;
216
217 if (!neighborUp(p))
218 return 0;
219
220 return 1;
221 }
222
223 bool
224 peerCanOpenMore(const CachePeer *p)
225 {
226 const int effectiveLimit = p->max_conn <= 0 ? Squid_MaxFD : p->max_conn;
227 const int remaining = effectiveLimit - p->stats.conn_open;
228 debugs(15, 7, remaining << '=' << effectiveLimit << '-' << p->stats.conn_open);
229 return remaining > 0;
230 }
231
232 bool
233 peerHasConnAvailable(const CachePeer *p)
234 {
235 // Standby connections can be used without opening new connections.
236 const int standbys = p->standby.pool ? p->standby.pool->count() : 0;
237
238 // XXX: Some idle pconns can be used without opening new connections.
239 // Complication: Idle pconns cannot be reused for some requests.
240 const int usableIdles = 0;
241
242 const int available = standbys + usableIdles;
243 debugs(15, 7, available << '=' << standbys << '+' << usableIdles);
244 return available > 0;
245 }
246
247 void
248 peerConnClosed(CachePeer *p)
249 {
250 --p->stats.conn_open;
251 if (p->standby.waitingForClose && peerCanOpenMore(p)) {
252 p->standby.waitingForClose = false;
253 PeerPoolMgr::Checkpoint(p->standby.mgr, "conn closed");
254 }
255 }
256
257 /* Return TRUE if it is okay to send an HTTP request to this CachePeer. */
258 int
259 peerHTTPOkay(const CachePeer * p, PeerSelector * ps)
260 {
261 if (!peerCanOpenMore(p) && !peerHasConnAvailable(p))
262 return 0;
263
264 if (!peerAllowedToUse(p, ps))
265 return 0;
266
267 if (!neighborUp(p))
268 return 0;
269
270 return 1;
271 }
272
273 int
274 neighborsCount(PeerSelector *ps)
275 {
276 CachePeer *p = nullptr;
277 int count = 0;
278
279 for (p = Config.peers; p; p = p->next)
280 if (peerWouldBePinged(p, ps))
281 ++count;
282
283 debugs(15, 3, "neighborsCount: " << count);
284
285 return count;
286 }
287
288 CachePeer *
289 getFirstUpParent(PeerSelector *ps)
290 {
291 assert(ps);
292 HttpRequest *request = ps->request;
293
294 CachePeer *p = nullptr;
295
296 for (p = Config.peers; p; p = p->next) {
297 if (!neighborUp(p))
298 continue;
299
300 if (neighborType(p, request->url) != PEER_PARENT)
301 continue;
302
303 if (!peerHTTPOkay(p, ps))
304 continue;
305
306 break;
307 }
308
309 debugs(15, 3, "returning " << RawPointer(p).orNil());
310 return p;
311 }
312
313 CachePeer *
314 getRoundRobinParent(PeerSelector *ps)
315 {
316 assert(ps);
317 HttpRequest *request = ps->request;
318
319 CachePeer *p;
320 CachePeer *q = nullptr;
321
322 for (p = Config.peers; p; p = p->next) {
323 if (!p->options.roundrobin)
324 continue;
325
326 if (neighborType(p, request->url) != PEER_PARENT)
327 continue;
328
329 if (!peerHTTPOkay(p, ps))
330 continue;
331
332 if (p->weight == 0)
333 continue;
334
335 if (q) {
336 if (p->weight == q->weight) {
337 if (q->rr_count < p->rr_count)
338 continue;
339 } else if ( ((double) q->rr_count / q->weight) < ((double) p->rr_count / p->weight)) {
340 continue;
341 }
342 }
343
344 q = p;
345 }
346
347 if (q)
348 ++ q->rr_count;
349
350 debugs(15, 3, "returning " << RawPointer(q).orNil());
351
352 return q;
353 }
354
355 CachePeer *
356 getWeightedRoundRobinParent(PeerSelector *ps)
357 {
358 assert(ps);
359 HttpRequest *request = ps->request;
360
361 CachePeer *p;
362 CachePeer *q = nullptr;
363 int weighted_rtt;
364
365 for (p = Config.peers; p; p = p->next) {
366 if (!p->options.weighted_roundrobin)
367 continue;
368
369 if (neighborType(p, request->url) != PEER_PARENT)
370 continue;
371
372 if (!peerHTTPOkay(p, ps))
373 continue;
374
375 if (q && q->rr_count < p->rr_count)
376 continue;
377
378 q = p;
379 }
380
381 if (q && q->rr_count > 1000000)
382 for (p = Config.peers; p; p = p->next) {
383 if (!p->options.weighted_roundrobin)
384 continue;
385
386 if (neighborType(p, request->url) != PEER_PARENT)
387 continue;
388
389 p->rr_count = 0;
390 }
391
392 if (q) {
393 weighted_rtt = (q->stats.rtt - q->basetime) / q->weight;
394
395 if (weighted_rtt < 1)
396 weighted_rtt = 1;
397
398 q->rr_count += weighted_rtt;
399
400 debugs(15, 3, "getWeightedRoundRobinParent: weighted_rtt " << weighted_rtt);
401 }
402
403 debugs(15, 3, "returning " << RawPointer(q).orNil());
404 return q;
405 }
406
407 /**
408 * This gets called every 5 minutes to clear the round-robin counter.
409 * The exact timing is an arbitrary default, set on estimate timing of a
410 * large number of requests in a high-performance environment during the
411 * period. The larger the number of requests between cycled resets the
412 * more balanced the operations.
413 *
414 * \param data unused
415 *
416 * TODO: Make the reset timing a selectable parameter in squid.conf
417 */
418 static void
419 peerClearRRLoop(void *data)
420 {
421 peerClearRR();
422 eventAdd("peerClearRR", peerClearRRLoop, data, 5 * 60.0, 0);
423 }
424
425 /**
426 * This gets called on startup and restart to kick off the CachePeer round-robin
427 * maintenance event. It ensures that no matter how many times its called
428 * no more than one event is scheduled.
429 */
430 void
431 peerClearRRStart(void)
432 {
433 static bool event_added = false;
434 if (!event_added) {
435 peerClearRRLoop(nullptr);
436 event_added=true;
437 }
438 }
439
440 /**
441 * Called whenever the round-robin counters need to be reset to a sane state.
442 * So far those times are:
443 * - On startup and reconfigure - to set the counters to sane initial settings.
444 * - When a CachePeer has revived from dead, to prevent the revived CachePeer being
445 * flooded with requests which it has 'missed' during the down period.
446 */
447 void
448 peerClearRR()
449 {
450 CachePeer *p = nullptr;
451 for (p = Config.peers; p; p = p->next) {
452 p->rr_count = 1;
453 }
454 }
455
456 void
457 peerAlive(CachePeer *p)
458 {
459 if (p->stats.logged_state == PEER_DEAD && p->tcp_up) {
460 debugs(15, DBG_IMPORTANT, "Detected REVIVED " << neighborTypeStr(p) << ": " << *p);
461 p->stats.logged_state = PEER_ALIVE;
462 peerClearRR();
463 if (p->standby.mgr.valid())
464 PeerPoolMgr::Checkpoint(p->standby.mgr, "revived peer");
465 }
466
467 p->stats.last_reply = squid_curtime;
468 p->stats.probe_start = 0;
469
470 // TODO: Remove or explain how we could detect an alive peer without IP addresses
471 if (!p->n_addresses)
472 ipcache_nbgethostbyname(p->host, peerDNSConfigure, p);
473 }
474
475 CachePeer *
476 getDefaultParent(PeerSelector *ps)
477 {
478 assert(ps);
479 HttpRequest *request = ps->request;
480
481 CachePeer *p = nullptr;
482
483 for (p = Config.peers; p; p = p->next) {
484 if (neighborType(p, request->url) != PEER_PARENT)
485 continue;
486
487 if (!p->options.default_parent)
488 continue;
489
490 if (!peerHTTPOkay(p, ps))
491 continue;
492
493 debugs(15, 3, "returning " << *p);
494
495 return p;
496 }
497
498 // TODO: Refactor similar get*() functions to use our return/reporting style
499 debugs(15, 3, "none found");
500 return nullptr;
501 }
502
503 CachePeer *
504 getNextPeer(CachePeer * p)
505 {
506 return p->next;
507 }
508
509 CachePeer *
510 getFirstPeer(void)
511 {
512 return Config.peers;
513 }
514
515 static void
516 neighborRemove(CachePeer * target)
517 {
518 CachePeer *p = nullptr;
519 CachePeer **P = nullptr;
520 p = Config.peers;
521 P = &Config.peers;
522
523 while (p) {
524 if (target == p)
525 break;
526
527 P = &p->next;
528
529 p = p->next;
530 }
531
532 if (p) {
533 *P = p->next;
534 p->next = nullptr;
535 delete p;
536 --Config.npeers;
537 }
538
539 first_ping = Config.peers;
540 }
541
542 static void
543 neighborsRegisterWithCacheManager()
544 {
545 Mgr::RegisterAction("server_list",
546 "Peer Cache Statistics",
547 neighborDumpPeers, 0, 1);
548
549 if (Comm::IsConnOpen(icpIncomingConn)) {
550 Mgr::RegisterAction("non_peers",
551 "List of Unknown sites sending ICP messages",
552 neighborDumpNonPeers, 0, 1);
553 }
554 }
555
556 void
557 neighbors_init(void)
558 {
559 struct servent *sep = nullptr;
560 const char *me = getMyHostname();
561 CachePeer *thisPeer = nullptr;
562 CachePeer *next = nullptr;
563
564 neighborsRegisterWithCacheManager();
565
566 if (Comm::IsConnOpen(icpIncomingConn)) {
567
568 for (thisPeer = Config.peers; thisPeer; thisPeer = next) {
569 next = thisPeer->next;
570
571 if (0 != strcmp(thisPeer->host, me))
572 continue;
573
574 for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) {
575 if (thisPeer->http_port != s->s.port())
576 continue;
577
578 debugs(15, DBG_IMPORTANT, "WARNING: Peer looks like this host." <<
579 Debug::Extra << "Ignoring cache_peer " << *thisPeer);
580
581 neighborRemove(thisPeer);
582 }
583 }
584 }
585
586 peerRefreshDNS((void *) 1);
587
588 sep = getservbyname("echo", "udp");
589 echo_port = sep ? ntohs((unsigned short) sep->s_port) : 7;
590
591 first_ping = Config.peers;
592 }
593
594 int
595 neighborsUdpPing(HttpRequest * request,
596 StoreEntry * entry,
597 IRCB * callback,
598 PeerSelector *ps,
599 int *exprep,
600 int *timeout)
601 {
602 const char *url = entry->url();
603 MemObject *mem = entry->mem_obj;
604 CachePeer *p = nullptr;
605 int i;
606 int reqnum = 0;
607 int flags;
608 int queries_sent = 0;
609 int peers_pinged = 0;
610 int parent_timeout = 0, parent_exprep = 0;
611 int sibling_timeout = 0, sibling_exprep = 0;
612 int mcast_timeout = 0, mcast_exprep = 0;
613
614 if (Config.peers == nullptr)
615 return 0;
616
617 assert(!entry->hasDisk());
618
619 mem->start_ping = current_time;
620
621 mem->ping_reply_callback = callback;
622
623 mem->ircb_data = ps;
624
625 reqnum = icpSetCacheKey((const cache_key *)entry->key);
626
627 for (i = 0, p = first_ping; i++ < Config.npeers; p = p->next) {
628 if (p == nullptr)
629 p = Config.peers;
630
631 debugs(15, 5, "candidate: " << *p);
632
633 if (!peerWouldBePinged(p, ps))
634 continue; /* next CachePeer */
635
636 ++peers_pinged;
637
638 debugs(15, 4, "pinging cache_peer " << *p << " for '" << url << "'");
639
640 debugs(15, 3, "neighborsUdpPing: key = '" << entry->getMD5Text() << "'");
641
642 debugs(15, 3, "neighborsUdpPing: reqnum = " << reqnum);
643
644 #if USE_HTCP
645 if (p->options.htcp && !p->options.htcp_only_clr) {
646 if (Config.Port.htcp <= 0) {
647 debugs(15, DBG_CRITICAL, "ERROR: HTCP is disabled! Cannot send HTCP request to peer.");
648 continue;
649 }
650
651 debugs(15, 3, "neighborsUdpPing: sending HTCP query");
652 if (htcpQuery(entry, request, p) <= 0)
653 continue; // unable to send.
654 } else
655 #endif
656 {
657 if (Config.Port.icp <= 0 || !Comm::IsConnOpen(icpOutgoingConn)) {
658 debugs(15, DBG_CRITICAL, "ERROR: ICP is disabled! Cannot send ICP request to peer.");
659 continue;
660 } else {
661
662 if (p->type == PEER_MULTICAST)
663 mcastSetTtl(icpOutgoingConn->fd, p->mcast.ttl);
664
665 if (p->icp.port == echo_port) {
666 debugs(15, 4, "neighborsUdpPing: Looks like a dumb cache, send DECHO ping");
667 // TODO: Get ALE from callback_data if possible.
668 icpCreateAndSend(ICP_DECHO, 0, url, reqnum, 0,
669 icpOutgoingConn->fd, p->in_addr, nullptr);
670 } else {
671 flags = 0;
672
673 if (Config.onoff.query_icmp)
674 if (p->icp.version == ICP_VERSION_2)
675 flags |= ICP_FLAG_SRC_RTT;
676
677 // TODO: Get ALE from callback_data if possible.
678 icpCreateAndSend(ICP_QUERY, flags, url, reqnum, 0,
679 icpOutgoingConn->fd, p->in_addr, nullptr);
680 }
681 }
682 }
683
684 ++queries_sent;
685
686 ++ p->stats.pings_sent;
687
688 if (p->type == PEER_MULTICAST) {
689 mcast_exprep += p->mcast.n_replies_expected;
690 mcast_timeout += (p->stats.rtt * p->mcast.n_replies_expected);
691 } else if (neighborUp(p)) {
692 /* its alive, expect a reply from it */
693
694 if (neighborType(p, request->url) == PEER_PARENT) {
695 ++parent_exprep;
696 parent_timeout += p->stats.rtt;
697 } else {
698 ++sibling_exprep;
699 sibling_timeout += p->stats.rtt;
700 }
701 } else {
702 /* Neighbor is dead; ping it anyway, but don't expect a reply */
703 /* log it once at the threshold */
704
705 if (p->stats.logged_state == PEER_ALIVE) {
706 debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(p) << ": " << *p);
707 p->stats.logged_state = PEER_DEAD;
708 }
709 }
710
711 p->stats.last_query = squid_curtime;
712
713 /*
714 * keep probe_start == 0 for a multicast CachePeer,
715 * so neighborUp() never says this CachePeer is dead.
716 */
717
718 if ((p->type != PEER_MULTICAST) && (p->stats.probe_start == 0))
719 p->stats.probe_start = squid_curtime;
720 }
721
722 if ((first_ping = first_ping->next) == nullptr)
723 first_ping = Config.peers;
724
725 /*
726 * How many replies to expect?
727 */
728 *exprep = parent_exprep + sibling_exprep + mcast_exprep;
729
730 /*
731 * If there is a configured timeout, use it
732 */
733 if (Config.Timeout.icp_query)
734 *timeout = Config.Timeout.icp_query;
735 else {
736 if (*exprep > 0) {
737 if (parent_exprep)
738 *timeout = 2 * parent_timeout / parent_exprep;
739 else if (mcast_exprep)
740 *timeout = 2 * mcast_timeout / mcast_exprep;
741 else
742 *timeout = 2 * sibling_timeout / sibling_exprep;
743 } else
744 *timeout = 2000; /* 2 seconds */
745
746 if (Config.Timeout.icp_query_max)
747 if (*timeout > Config.Timeout.icp_query_max)
748 *timeout = Config.Timeout.icp_query_max;
749
750 if (*timeout < Config.Timeout.icp_query_min)
751 *timeout = Config.Timeout.icp_query_min;
752 }
753
754 return peers_pinged;
755 }
756
757 /* lookup the digest of a given CachePeer */
758 lookup_t
759 peerDigestLookup(CachePeer * p, PeerSelector * ps)
760 {
761 #if USE_CACHE_DIGESTS
762 assert(ps);
763 HttpRequest *request = ps->request;
764 const cache_key *key = request ? storeKeyPublicByRequest(request) : nullptr;
765 assert(p);
766 assert(request);
767 debugs(15, 5, "cache_peer " << *p);
768 /* does the peeer have a valid digest? */
769
770 if (!p->digest) {
771 debugs(15, 5, "peerDigestLookup: gone!");
772 return LOOKUP_NONE;
773 } else if (!peerHTTPOkay(p, ps)) {
774 debugs(15, 5, "peerDigestLookup: !peerHTTPOkay");
775 return LOOKUP_NONE;
776 } else if (!p->digest->flags.needed) {
777 debugs(15, 5, "peerDigestLookup: note need");
778 peerDigestNeeded(p->digest);
779 return LOOKUP_NONE;
780 } else if (!p->digest->flags.usable) {
781 debugs(15, 5, "peerDigestLookup: !ready && " << (p->digest->flags.requested ? "" : "!") << "requested");
782 return LOOKUP_NONE;
783 }
784
785 debugs(15, 5, "OK to lookup cache_peer " << *p);
786 assert(p->digest->cd);
787 /* does digest predict a hit? */
788
789 if (!p->digest->cd->contains(key))
790 return LOOKUP_MISS;
791
792 debugs(15, 5, "HIT for cache_peer " << *p);
793
794 return LOOKUP_HIT;
795 #else
796 (void)p;
797 (void)ps;
798 #endif
799
800 return LOOKUP_NONE;
801 }
802
803 /* select best CachePeer based on cache digests */
804 CachePeer *
805 neighborsDigestSelect(PeerSelector *ps)
806 {
807 CachePeer *best_p = nullptr;
808 #if USE_CACHE_DIGESTS
809 assert(ps);
810 HttpRequest *request = ps->request;
811
812 int best_rtt = 0;
813 int choice_count = 0;
814 int ichoice_count = 0;
815 CachePeer *p;
816 int p_rtt;
817 int i;
818
819 if (!request->flags.hierarchical)
820 return nullptr;
821
822 storeKeyPublicByRequest(request);
823
824 for (i = 0, p = first_ping; i++ < Config.npeers; p = p->next) {
825 lookup_t lookup;
826
827 if (!p)
828 p = Config.peers;
829
830 if (i == 1)
831 first_ping = p;
832
833 lookup = peerDigestLookup(p, ps);
834
835 if (lookup == LOOKUP_NONE)
836 continue;
837
838 ++choice_count;
839
840 if (lookup == LOOKUP_MISS)
841 continue;
842
843 p_rtt = netdbHostRtt(p->host);
844
845 debugs(15, 5, "cache_peer " << *p << " rtt: " << p_rtt);
846
847 /* is this CachePeer better than others in terms of rtt ? */
848 if (!best_p || (p_rtt && p_rtt < best_rtt)) {
849 best_p = p;
850 best_rtt = p_rtt;
851
852 if (p_rtt) /* informative choice (aka educated guess) */
853 ++ichoice_count;
854
855 debugs(15, 4, "cache_peer " << *p << " leads with rtt " << best_rtt);
856 }
857 }
858
859 debugs(15, 4, "neighborsDigestSelect: choices: " << choice_count << " (" << ichoice_count << ")");
860 peerNoteDigestLookup(request, best_p,
861 best_p ? LOOKUP_HIT : (choice_count ? LOOKUP_MISS : LOOKUP_NONE));
862 request->hier.n_choices = choice_count;
863 request->hier.n_ichoices = ichoice_count;
864 #else
865 (void)ps;
866 #endif
867
868 return best_p;
869 }
870
871 void
872 peerNoteDigestLookup(HttpRequest * request, CachePeer * p, lookup_t lookup)
873 {
874 #if USE_CACHE_DIGESTS
875 if (p)
876 strncpy(request->hier.cd_host, p->host, sizeof(request->hier.cd_host)-1);
877 else
878 *request->hier.cd_host = '\0';
879
880 request->hier.cd_lookup = lookup;
881 debugs(15, 4, "cache_peer " << RawPointer(p).orNil() << ", lookup: " << lookup_t_str[lookup]);
882 #else
883 (void)request;
884 (void)p;
885 (void)lookup;
886 #endif
887 }
888
889 static void
890 neighborAlive(CachePeer * p, const MemObject *, const icp_common_t * header)
891 {
892 peerAlive(p);
893 ++ p->stats.pings_acked;
894
895 if ((icp_opcode) header->opcode <= ICP_END)
896 ++ p->icp.counts[header->opcode];
897
898 p->icp.version = (int) header->version;
899 }
900
901 static void
902 neighborUpdateRtt(CachePeer * p, MemObject * mem)
903 {
904 int rtt, rtt_av_factor;
905
906 if (!mem)
907 return;
908
909 if (!mem->start_ping.tv_sec)
910 return;
911
912 rtt = tvSubMsec(mem->start_ping, current_time);
913
914 if (rtt < 1 || rtt > 10000)
915 return;
916
917 rtt_av_factor = RTT_AV_FACTOR;
918
919 if (p->options.weighted_roundrobin)
920 rtt_av_factor = RTT_BACKGROUND_AV_FACTOR;
921
922 p->stats.rtt = Math::intAverage(p->stats.rtt, rtt, p->stats.pings_acked, rtt_av_factor);
923 }
924
925 #if USE_HTCP
926 static void
927 neighborAliveHtcp(CachePeer * p, const MemObject *, const HtcpReplyData * htcp)
928 {
929 peerAlive(p);
930 ++ p->stats.pings_acked;
931 ++ p->htcp.counts[htcp->hit ? 1 : 0];
932 p->htcp.version = htcp->version;
933 }
934
935 #endif
936
937 static void
938 neighborCountIgnored(CachePeer * p)
939 {
940 if (p == nullptr)
941 return;
942
943 ++ p->stats.ignored_replies;
944
945 ++NLateReplies;
946 }
947
948 static CachePeer *non_peers = nullptr;
949
950 static void
951 neighborIgnoreNonPeer(const Ip::Address &from, icp_opcode opcode)
952 {
953 CachePeer *np;
954
955 for (np = non_peers; np; np = np->next) {
956 if (np->in_addr != from)
957 continue;
958
959 if (np->in_addr.port() != from.port())
960 continue;
961
962 break;
963 }
964
965 if (np == nullptr) {
966 char fromStr[MAX_IPSTRLEN];
967 from.toStr(fromStr, sizeof(fromStr));
968 np = new CachePeer(fromStr);
969 np->in_addr = from;
970 np->icp.port = from.port();
971 np->type = PEER_NONE;
972 np->next = non_peers;
973 non_peers = np;
974 }
975
976 ++ np->icp.counts[opcode];
977
978 if (isPowTen(++np->stats.ignored_replies))
979 debugs(15, DBG_IMPORTANT, "WARNING: Ignored " << np->stats.ignored_replies << " replies from non-peer " << *np);
980 }
981
982 /* ignoreMulticastReply
983 *
984 * * We want to ignore replies from multicast peers if the
985 * * cache_host_domain rules would normally prevent the CachePeer
986 * * from being used
987 */
988 static int
989 ignoreMulticastReply(CachePeer * p, PeerSelector * ps)
990 {
991 if (p == nullptr)
992 return 0;
993
994 if (!p->options.mcast_responder)
995 return 0;
996
997 if (peerHTTPOkay(p, ps))
998 return 0;
999
1000 return 1;
1001 }
1002
1003 /**
1004 * I should attach these records to the entry. We take the first
1005 * hit we get our wait until everyone misses. The timeout handler
1006 * call needs to nip this shopping list or call one of the misses.
1007 *
1008 * If a hit process is already started, then sobeit
1009 */
1010 void
1011 neighborsUdpAck(const cache_key * key, icp_common_t * header, const Ip::Address &from)
1012 {
1013 CachePeer *p = nullptr;
1014 StoreEntry *entry;
1015 MemObject *mem = nullptr;
1016 peer_t ntype = PEER_NONE;
1017 icp_opcode opcode = (icp_opcode) header->opcode;
1018
1019 debugs(15, 6, "neighborsUdpAck: opcode " << opcode << " '" << storeKeyText(key) << "'");
1020
1021 if ((entry = Store::Root().findCallbackXXX(key)))
1022 mem = entry->mem_obj;
1023
1024 if ((p = whichPeer(from)))
1025 neighborAlive(p, mem, header);
1026
1027 if (opcode > ICP_END)
1028 return;
1029
1030 const char *opcode_d = icp_opcode_str[opcode];
1031
1032 if (p)
1033 neighborUpdateRtt(p, mem);
1034
1035 /* Does the entry exist? */
1036 if (nullptr == entry) {
1037 debugs(12, 3, "neighborsUdpAck: Cache key '" << storeKeyText(key) << "' not found");
1038 neighborCountIgnored(p);
1039 return;
1040 }
1041
1042 /* check if someone is already fetching it */
1043 if (EBIT_TEST(entry->flags, ENTRY_DISPATCHED)) {
1044 debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key) << "' already being fetched.");
1045 neighborCountIgnored(p);
1046 return;
1047 }
1048
1049 if (mem == nullptr) {
1050 debugs(15, 2, "Ignoring " << opcode_d << " for missing mem_obj: " << storeKeyText(key));
1051 neighborCountIgnored(p);
1052 return;
1053 }
1054
1055 if (entry->ping_status != PING_WAITING) {
1056 debugs(15, 2, "neighborsUdpAck: Late " << opcode_d << " for " << storeKeyText(key));
1057 neighborCountIgnored(p);
1058 return;
1059 }
1060
1061 if (!entry->locked()) {
1062 // TODO: many entries are unlocked; why is this reported at level 1?
1063 debugs(12, DBG_IMPORTANT, "neighborsUdpAck: '" << storeKeyText(key) << "' has no locks");
1064 neighborCountIgnored(p);
1065 return;
1066 }
1067
1068 if (!mem->ircb_data) {
1069 debugs(12, DBG_IMPORTANT, "ERROR: Squid BUG: missing ICP callback data for " << *entry);
1070 neighborCountIgnored(p);
1071 return;
1072 }
1073
1074 debugs(15, 3, opcode_d << " for " << storeKeyText(key) << " from " << RawPointer(p).orNil("source"));
1075
1076 if (p) {
1077 ntype = neighborType(p, mem->request->url);
1078 }
1079
1080 if (ignoreMulticastReply(p, mem->ircb_data)) {
1081 neighborCountIgnored(p);
1082 } else if (opcode == ICP_MISS) {
1083 if (p == nullptr) {
1084 neighborIgnoreNonPeer(from, opcode);
1085 } else {
1086 mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data);
1087 }
1088 } else if (opcode == ICP_HIT) {
1089 if (p == nullptr) {
1090 neighborIgnoreNonPeer(from, opcode);
1091 } else {
1092 header->opcode = ICP_HIT;
1093 mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data);
1094 }
1095 } else if (opcode == ICP_DECHO) {
1096 if (p == nullptr) {
1097 neighborIgnoreNonPeer(from, opcode);
1098 } else if (ntype == PEER_SIBLING) {
1099 debug_trap("neighborsUdpAck: Found non-ICP cache as SIBLING\n");
1100 debug_trap("neighborsUdpAck: non-ICP neighbors must be a PARENT\n");
1101 } else {
1102 mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data);
1103 }
1104 } else if (opcode == ICP_SECHO) {
1105 if (p) {
1106 debugs(15, DBG_IMPORTANT, "Ignoring SECHO from neighbor " << *p);
1107 neighborCountIgnored(p);
1108 } else {
1109 debugs(15, DBG_IMPORTANT, "Unsolicited SECHO from " << from);
1110 }
1111 } else if (opcode == ICP_DENIED) {
1112 if (p == nullptr) {
1113 neighborIgnoreNonPeer(from, opcode);
1114 } else if (p->stats.pings_acked > 100) {
1115 if (100 * p->icp.counts[ICP_DENIED] / p->stats.pings_acked > 95) {
1116 debugs(15, DBG_CRITICAL, "Disabling cache_peer " << *p <<
1117 " because over 95% of its replies are UDP_DENIED");
1118 neighborRemove(p);
1119 p = nullptr;
1120 } else {
1121 neighborCountIgnored(p);
1122 }
1123 }
1124 } else if (opcode == ICP_MISS_NOFETCH) {
1125 mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data);
1126 } else {
1127 debugs(15, DBG_CRITICAL, "ERROR: neighborsUdpAck: Unexpected ICP reply: " << opcode_d);
1128 }
1129 }
1130
1131 CachePeer *
1132 findCachePeerByName(const char * const name)
1133 {
1134 CachePeer *p = nullptr;
1135
1136 for (p = Config.peers; p; p = p->next) {
1137 if (!strcasecmp(name, p->name))
1138 break;
1139 }
1140
1141 return p;
1142 }
1143
1144 int
1145 neighborUp(const CachePeer * p)
1146 {
1147 if (!p->tcp_up) {
1148 // TODO: When CachePeer gets its own CodeContext, pass that context instead of nullptr
1149 CallService(nullptr, [&] {
1150 peerProbeConnect(const_cast<CachePeer*>(p));
1151 });
1152 return 0;
1153 }
1154
1155 /*
1156 * The CachePeer can not be UP if we don't have any IP addresses
1157 * for it.
1158 */
1159 if (0 == p->n_addresses) {
1160 debugs(15, 8, "DOWN (no-ip): " << *p);
1161 return 0;
1162 }
1163
1164 if (p->options.no_query) {
1165 debugs(15, 8, "UP (no-query): " << *p);
1166 return 1;
1167 }
1168
1169 if (p->stats.probe_start != 0 &&
1170 squid_curtime - p->stats.probe_start > Config.Timeout.deadPeer) {
1171 debugs(15, 8, "DOWN (dead): " << *p);
1172 return 0;
1173 }
1174
1175 debugs(15, 8, "UP: " << *p);
1176 return 1;
1177 }
1178
1179 time_t
1180 positiveTimeout(const time_t timeout)
1181 {
1182 return max(static_cast<time_t>(1), timeout);
1183 }
1184
1185 static void
1186 peerDNSConfigure(const ipcache_addrs *ia, const Dns::LookupDetails &, void *data)
1187 {
1188 // TODO: connections to no-longer valid IP addresses should be
1189 // closed when we can detect such IP addresses.
1190
1191 CachePeer *p = (CachePeer *)data;
1192
1193 if (p->n_addresses == 0) {
1194 debugs(15, Important(29), "Configuring " << neighborTypeStr(p) << " " << *p);
1195
1196 if (p->type == PEER_MULTICAST)
1197 debugs(15, DBG_IMPORTANT, " Multicast TTL = " << p->mcast.ttl);
1198 }
1199
1200 p->n_addresses = 0;
1201
1202 if (ia == nullptr) {
1203 debugs(0, DBG_CRITICAL, "WARNING: DNS lookup for '" << *p << "' failed!");
1204 return;
1205 }
1206
1207 if (ia->empty()) {
1208 debugs(0, DBG_CRITICAL, "WARNING: No IP address found for '" << *p << "'!");
1209 return;
1210 }
1211
1212 for (const auto &ip: ia->goodAndBad()) { // TODO: Consider using just good().
1213 if (p->n_addresses < PEER_MAX_ADDRESSES) {
1214 const auto idx = p->n_addresses++;
1215 p->addresses[idx] = ip;
1216 debugs(15, 2, "--> IP address #" << idx << ": " << p->addresses[idx]);
1217 } else {
1218 debugs(15, 3, "ignoring remaining " << (ia->size() - p->n_addresses) << " ips");
1219 break;
1220 }
1221 }
1222
1223 p->in_addr.setEmpty();
1224 p->in_addr = p->addresses[0];
1225 p->in_addr.port(p->icp.port);
1226
1227 peerProbeConnect(p, true); // detect any died or revived peers ASAP
1228
1229 if (p->type == PEER_MULTICAST)
1230 peerCountMcastPeersSchedule(p, 10);
1231
1232 #if USE_ICMP
1233 if (p->type != PEER_MULTICAST && IamWorkerProcess())
1234 if (!p->options.no_netdb_exchange)
1235 eventAddIsh("netdbExchangeStart", netdbExchangeStart, p, 30.0, 1);
1236 #endif
1237
1238 if (p->standby.mgr.valid())
1239 PeerPoolMgr::Checkpoint(p->standby.mgr, "resolved peer");
1240 }
1241
1242 static void
1243 peerRefreshDNS(void *data)
1244 {
1245 CachePeer *p = nullptr;
1246
1247 if (eventFind(peerRefreshDNS, nullptr))
1248 eventDelete(peerRefreshDNS, nullptr);
1249
1250 if (!data && 0 == stat5minClientRequests()) {
1251 /* no recent client traffic, wait a bit */
1252 eventAddIsh("peerRefreshDNS", peerRefreshDNS, nullptr, 180.0, 1);
1253 return;
1254 }
1255
1256 for (p = Config.peers; p; p = p->next)
1257 ipcache_nbgethostbyname(p->host, peerDNSConfigure, p);
1258
1259 /* Reconfigure the peers every hour */
1260 eventAddIsh("peerRefreshDNS", peerRefreshDNS, nullptr, 3600.0, 1);
1261 }
1262
1263 /// whether new TCP probes are currently banned
1264 static bool
1265 peerProbeIsBusy(const CachePeer *p)
1266 {
1267 if (p->testing_now > 0) {
1268 debugs(15, 8, "yes, probing " << p);
1269 return true;
1270 }
1271 if (squid_curtime - p->stats.last_connect_probe == 0) {
1272 debugs(15, 8, "yes, just probed " << p);
1273 return true;
1274 }
1275 return false;
1276 }
1277 /*
1278 * peerProbeConnect will be called on dead peers by neighborUp
1279 */
1280 static void
1281 peerProbeConnect(CachePeer *p, const bool reprobeIfBusy)
1282 {
1283 if (peerProbeIsBusy(p)) {
1284 p->reprobe = reprobeIfBusy;
1285 return;
1286 }
1287 p->reprobe = false;
1288
1289 const auto ctimeout = p->connectTimeout();
1290 /* for each IP address of this CachePeer. find one that we can connect to and probe it. */
1291 for (int i = 0; i < p->n_addresses; ++i) {
1292 Comm::ConnectionPointer conn = new Comm::Connection;
1293 conn->remote = p->addresses[i];
1294 conn->remote.port(p->http_port);
1295 conn->setPeer(p);
1296 getOutgoingAddress(nullptr, conn);
1297
1298 ++ p->testing_now;
1299
1300 AsyncCall::Pointer call = commCbCall(15,3, "peerProbeConnectDone", CommConnectCbPtrFun(peerProbeConnectDone, p));
1301 Comm::ConnOpener *cs = new Comm::ConnOpener(conn, call, ctimeout);
1302 cs->setHost(p->host);
1303 AsyncJob::Start(cs);
1304 }
1305
1306 p->stats.last_connect_probe = squid_curtime;
1307 }
1308
1309 static void
1310 peerProbeConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int, void *data)
1311 {
1312 CachePeer *p = (CachePeer*)data;
1313
1314 if (status == Comm::OK)
1315 p->noteSuccess();
1316 else
1317 p->noteFailure(Http::scNone);
1318
1319 -- p->testing_now;
1320 conn->close();
1321 // TODO: log this traffic.
1322
1323 if (p->reprobe)
1324 peerProbeConnect(p);
1325 }
1326
1327 static void
1328 peerCountMcastPeersSchedule(CachePeer * p, time_t when)
1329 {
1330 if (p->mcast.flags.count_event_pending)
1331 return;
1332
1333 eventAdd("peerCountMcastPeersStart",
1334 peerCountMcastPeersStart,
1335 p,
1336 (double) when, 1);
1337
1338 p->mcast.flags.count_event_pending = true;
1339 }
1340
1341 static void
1342 peerCountMcastPeersStart(void *data)
1343 {
1344 const auto peer = static_cast<CachePeer*>(data);
1345 CallContextCreator([peer] {
1346 peerCountMcastPeersCreateAndSend(peer);
1347 });
1348 peerCountMcastPeersSchedule(peer, MCAST_COUNT_RATE);
1349 }
1350
1351 /// initiates an ICP transaction to a multicast peer
1352 static void
1353 peerCountMcastPeersCreateAndSend(CachePeer * const p)
1354 {
1355 // XXX: Do not create lots of complex fake objects (while abusing their
1356 // APIs) to pass around a few basic data points like start_ping and ping!
1357 MemObject *mem;
1358 int reqnum;
1359 // TODO: use class AnyP::Uri instead of constructing and re-parsing a string
1360 LOCAL_ARRAY(char, url, MAX_URL);
1361 assert(p->type == PEER_MULTICAST);
1362 p->mcast.flags.count_event_pending = false;
1363 snprintf(url, MAX_URL, "http://");
1364 p->in_addr.toUrl(url+7, MAX_URL -8 );
1365 strcat(url, "/");
1366 const auto mx = MasterXaction::MakePortless<XactionInitiator::initPeerMcast>();
1367 auto *req = HttpRequest::FromUrlXXX(url, mx);
1368 assert(req != nullptr);
1369 const AccessLogEntry::Pointer ale = new AccessLogEntry;
1370 ale->request = req;
1371 CodeContext::Reset(ale);
1372 StoreEntry *fake = storeCreateEntry(url, url, RequestFlags(), Http::METHOD_GET);
1373 const auto psstate = new PeerSelector(nullptr);
1374 psstate->request = req;
1375 HTTPMSGLOCK(psstate->request);
1376 psstate->entry = fake;
1377 psstate->peerCountMcastPeerXXX = cbdataReference(p);
1378 psstate->ping.start = current_time;
1379 psstate->al = ale;
1380 mem = fake->mem_obj;
1381 mem->request = psstate->request;
1382 mem->start_ping = current_time;
1383 mem->ping_reply_callback = peerCountHandleIcpReply;
1384 mem->ircb_data = psstate;
1385 mcastSetTtl(icpOutgoingConn->fd, p->mcast.ttl);
1386 p->mcast.id = mem->id;
1387 reqnum = icpSetCacheKey((const cache_key *)fake->key);
1388 icpCreateAndSend(ICP_QUERY, 0, url, reqnum, 0,
1389 icpOutgoingConn->fd, p->in_addr, psstate->al);
1390 fake->ping_status = PING_WAITING; // TODO: refactor to use PeerSelector::startPingWaiting()
1391 eventAdd("peerCountMcastPeersDone",
1392 peerCountMcastPeersDone,
1393 psstate,
1394 Config.Timeout.mcast_icp_query / 1000.0, 1);
1395 p->mcast.flags.counting = true;
1396 }
1397
1398 static void
1399 peerCountMcastPeersDone(void *data)
1400 {
1401 const auto psstate = static_cast<PeerSelector*>(data);
1402 CallBack(psstate->al, [psstate] {
1403 peerCountMcastPeersAbort(psstate);
1404 delete psstate;
1405 });
1406 }
1407
1408 /// ends counting of multicast ICP replies
1409 /// to the ICP query initiated by peerCountMcastPeersCreateAndSend()
1410 static void
1411 peerCountMcastPeersAbort(PeerSelector * const psstate)
1412 {
1413 StoreEntry *fake = psstate->entry;
1414
1415 if (cbdataReferenceValid(psstate->peerCountMcastPeerXXX)) {
1416 CachePeer *p = (CachePeer *)psstate->peerCountMcastPeerXXX;
1417 p->mcast.flags.counting = false;
1418 p->mcast.avg_n_members = Math::doubleAverage(p->mcast.avg_n_members, (double) psstate->ping.n_recv, ++p->mcast.n_times_counted, 10);
1419 debugs(15, DBG_IMPORTANT, "Group " << *p << ": " << psstate->ping.n_recv <<
1420 " replies, "<< std::setw(4)<< std::setprecision(2) <<
1421 p->mcast.avg_n_members <<" average, RTT " << p->stats.rtt);
1422 p->mcast.n_replies_expected = (int) p->mcast.avg_n_members;
1423 }
1424
1425 cbdataReferenceDone(psstate->peerCountMcastPeerXXX);
1426
1427 fake->abort(); // sets ENTRY_ABORTED and initiates related cleanup
1428 fake->mem_obj->request = nullptr;
1429 fake->unlock("peerCountMcastPeersDone");
1430 }
1431
1432 static void
1433 peerCountHandleIcpReply(CachePeer * p, peer_t, AnyP::ProtocolType proto, void *, void *data)
1434 {
1435 const auto psstate = static_cast<PeerSelector*>(data);
1436 StoreEntry *fake = psstate->entry;
1437 assert(fake);
1438 MemObject *mem = fake->mem_obj;
1439 assert(mem);
1440 int rtt = tvSubMsec(mem->start_ping, current_time);
1441 assert(proto == AnyP::PROTO_ICP);
1442 ++ psstate->ping.n_recv;
1443 int rtt_av_factor = RTT_AV_FACTOR;
1444
1445 if (p->options.weighted_roundrobin)
1446 rtt_av_factor = RTT_BACKGROUND_AV_FACTOR;
1447
1448 p->stats.rtt = Math::intAverage(p->stats.rtt, rtt, psstate->ping.n_recv, rtt_av_factor);
1449 }
1450
1451 static void
1452 neighborDumpPeers(StoreEntry * sentry)
1453 {
1454 dump_peers(sentry, Config.peers);
1455 }
1456
1457 static void
1458 neighborDumpNonPeers(StoreEntry * sentry)
1459 {
1460 dump_peers(sentry, non_peers);
1461 }
1462
1463 void
1464 dump_peer_options(StoreEntry * sentry, CachePeer * p)
1465 {
1466 if (p->options.proxy_only)
1467 storeAppendPrintf(sentry, " proxy-only");
1468
1469 if (p->options.no_query)
1470 storeAppendPrintf(sentry, " no-query");
1471
1472 if (p->options.background_ping)
1473 storeAppendPrintf(sentry, " background-ping");
1474
1475 if (p->options.no_digest)
1476 storeAppendPrintf(sentry, " no-digest");
1477
1478 if (p->options.default_parent)
1479 storeAppendPrintf(sentry, " default");
1480
1481 if (p->options.roundrobin)
1482 storeAppendPrintf(sentry, " round-robin");
1483
1484 if (p->options.carp)
1485 storeAppendPrintf(sentry, " carp");
1486
1487 #if USE_AUTH
1488 if (p->options.userhash)
1489 storeAppendPrintf(sentry, " userhash");
1490 #endif
1491
1492 if (p->options.sourcehash)
1493 storeAppendPrintf(sentry, " sourcehash");
1494
1495 if (p->options.weighted_roundrobin)
1496 storeAppendPrintf(sentry, " weighted-round-robin");
1497
1498 if (p->options.mcast_responder)
1499 storeAppendPrintf(sentry, " multicast-responder");
1500
1501 #if PEER_MULTICAST_SIBLINGS
1502 if (p->options.mcast_siblings)
1503 storeAppendPrintf(sentry, " multicast-siblings");
1504 #endif
1505
1506 if (p->weight != 1)
1507 storeAppendPrintf(sentry, " weight=%d", p->weight);
1508
1509 if (p->options.closest_only)
1510 storeAppendPrintf(sentry, " closest-only");
1511
1512 #if USE_HTCP
1513 if (p->options.htcp) {
1514 storeAppendPrintf(sentry, " htcp");
1515 if (p->options.htcp_oldsquid || p->options.htcp_no_clr || p->options.htcp_no_purge_clr || p->options.htcp_only_clr) {
1516 bool doneopts = false;
1517 if (p->options.htcp_oldsquid) {
1518 storeAppendPrintf(sentry, "oldsquid");
1519 doneopts = true;
1520 }
1521 if (p->options.htcp_no_clr) {
1522 storeAppendPrintf(sentry, "%sno-clr",(doneopts?",":"="));
1523 doneopts = true;
1524 }
1525 if (p->options.htcp_no_purge_clr) {
1526 storeAppendPrintf(sentry, "%sno-purge-clr",(doneopts?",":"="));
1527 doneopts = true;
1528 }
1529 if (p->options.htcp_only_clr) {
1530 storeAppendPrintf(sentry, "%sonly-clr",(doneopts?",":"="));
1531 //doneopts = true; // uncomment if more opts are added
1532 }
1533 }
1534 }
1535 #endif
1536
1537 if (p->options.no_netdb_exchange)
1538 storeAppendPrintf(sentry, " no-netdb-exchange");
1539
1540 #if USE_DELAY_POOLS
1541 if (p->options.no_delay)
1542 storeAppendPrintf(sentry, " no-delay");
1543 #endif
1544
1545 if (p->login)
1546 storeAppendPrintf(sentry, " login=%s", p->login);
1547
1548 if (p->mcast.ttl > 0)
1549 storeAppendPrintf(sentry, " ttl=%d", p->mcast.ttl);
1550
1551 if (p->connect_timeout_raw > 0)
1552 storeAppendPrintf(sentry, " connect-timeout=%d", (int)p->connect_timeout_raw);
1553
1554 if (p->connect_fail_limit != PEER_TCP_MAGIC_COUNT)
1555 storeAppendPrintf(sentry, " connect-fail-limit=%d", p->connect_fail_limit);
1556
1557 #if USE_CACHE_DIGESTS
1558
1559 if (p->digest_url)
1560 storeAppendPrintf(sentry, " digest-url=%s", p->digest_url);
1561
1562 #endif
1563
1564 if (p->options.allow_miss)
1565 storeAppendPrintf(sentry, " allow-miss");
1566
1567 if (p->options.no_tproxy)
1568 storeAppendPrintf(sentry, " no-tproxy");
1569
1570 if (p->max_conn > 0)
1571 storeAppendPrintf(sentry, " max-conn=%d", p->max_conn);
1572 if (p->standby.limit > 0)
1573 storeAppendPrintf(sentry, " standby=%d", p->standby.limit);
1574
1575 if (p->options.originserver)
1576 storeAppendPrintf(sentry, " originserver");
1577
1578 if (p->domain)
1579 storeAppendPrintf(sentry, " forceddomain=%s", p->domain);
1580
1581 if (p->connection_auth == 0)
1582 storeAppendPrintf(sentry, " connection-auth=off");
1583 else if (p->connection_auth == 1)
1584 storeAppendPrintf(sentry, " connection-auth=on");
1585 else if (p->connection_auth == 2)
1586 storeAppendPrintf(sentry, " connection-auth=auto");
1587
1588 p->secure.dumpCfg(sentry,"tls-");
1589 storeAppendPrintf(sentry, "\n");
1590 }
1591
1592 static void
1593 dump_peers(StoreEntry * sentry, CachePeer * peers)
1594 {
1595 char ntoabuf[MAX_IPSTRLEN];
1596 int i;
1597
1598 if (peers == nullptr)
1599 storeAppendPrintf(sentry, "There are no neighbors installed.\n");
1600
1601 for (CachePeer *e = peers; e; e = e->next) {
1602 assert(e->host != nullptr);
1603 storeAppendPrintf(sentry, "\n%-11.11s: %s\n",
1604 neighborTypeStr(e),
1605 e->name);
1606 storeAppendPrintf(sentry, "Host : %s/%d/%d\n",
1607 e->host,
1608 e->http_port,
1609 e->icp.port);
1610 storeAppendPrintf(sentry, "Flags :");
1611 dump_peer_options(sentry, e);
1612
1613 for (i = 0; i < e->n_addresses; ++i) {
1614 storeAppendPrintf(sentry, "Address[%d] : %s\n", i,
1615 e->addresses[i].toStr(ntoabuf,MAX_IPSTRLEN) );
1616 }
1617
1618 storeAppendPrintf(sentry, "Status : %s\n",
1619 neighborUp(e) ? "Up" : "Down");
1620 storeAppendPrintf(sentry, "FETCHES : %d\n", e->stats.fetches);
1621 storeAppendPrintf(sentry, "OPEN CONNS : %d\n", e->stats.conn_open);
1622 storeAppendPrintf(sentry, "AVG RTT : %d msec\n", e->stats.rtt);
1623
1624 if (!e->options.no_query) {
1625 storeAppendPrintf(sentry, "LAST QUERY : %8d seconds ago\n",
1626 (int) (squid_curtime - e->stats.last_query));
1627
1628 if (e->stats.last_reply > 0)
1629 storeAppendPrintf(sentry, "LAST REPLY : %8d seconds ago\n",
1630 (int) (squid_curtime - e->stats.last_reply));
1631 else
1632 storeAppendPrintf(sentry, "LAST REPLY : none received\n");
1633
1634 storeAppendPrintf(sentry, "PINGS SENT : %8d\n", e->stats.pings_sent);
1635
1636 storeAppendPrintf(sentry, "PINGS ACKED: %8d %3d%%\n",
1637 e->stats.pings_acked,
1638 Math::intPercent(e->stats.pings_acked, e->stats.pings_sent));
1639 }
1640
1641 storeAppendPrintf(sentry, "IGNORED : %8d %3d%%\n", e->stats.ignored_replies, Math::intPercent(e->stats.ignored_replies, e->stats.pings_acked));
1642
1643 if (!e->options.no_query) {
1644 storeAppendPrintf(sentry, "Histogram of PINGS ACKED:\n");
1645 #if USE_HTCP
1646
1647 if (e->options.htcp) {
1648 storeAppendPrintf(sentry, "\tMisses\t%8d %3d%%\n",
1649 e->htcp.counts[0],
1650 Math::intPercent(e->htcp.counts[0], e->stats.pings_acked));
1651 storeAppendPrintf(sentry, "\tHits\t%8d %3d%%\n",
1652 e->htcp.counts[1],
1653 Math::intPercent(e->htcp.counts[1], e->stats.pings_acked));
1654 } else {
1655 #endif
1656
1657 for (auto op : WholeEnum<icp_opcode>()) {
1658 if (e->icp.counts[op] == 0)
1659 continue;
1660
1661 storeAppendPrintf(sentry, " %12.12s : %8d %3d%%\n",
1662 icp_opcode_str[op],
1663 e->icp.counts[op],
1664 Math::intPercent(e->icp.counts[op], e->stats.pings_acked));
1665 }
1666
1667 #if USE_HTCP
1668
1669 }
1670
1671 #endif
1672
1673 }
1674
1675 if (e->stats.last_connect_failure) {
1676 storeAppendPrintf(sentry, "Last failed connect() at: %s\n",
1677 Time::FormatHttpd(e->stats.last_connect_failure));
1678 }
1679
1680 storeAppendPrintf(sentry, "keep-alive ratio: %d%%\n", Math::intPercent(e->stats.n_keepalives_recv, e->stats.n_keepalives_sent));
1681 }
1682 }
1683
1684 #if USE_HTCP
1685 void
1686 neighborsHtcpReply(const cache_key * key, HtcpReplyData * htcp, const Ip::Address &from)
1687 {
1688 StoreEntry *e = Store::Root().findCallbackXXX(key);
1689 MemObject *mem = nullptr;
1690 CachePeer *p;
1691 peer_t ntype = PEER_NONE;
1692 debugs(15, 6, "neighborsHtcpReply: " <<
1693 (htcp->hit ? "HIT" : "MISS") << " " <<
1694 storeKeyText(key) );
1695
1696 if (nullptr != e)
1697 mem = e->mem_obj;
1698
1699 if ((p = whichPeer(from)))
1700 neighborAliveHtcp(p, mem, htcp);
1701
1702 /* Does the entry exist? */
1703 if (nullptr == e) {
1704 debugs(12, 3, "neighyborsHtcpReply: Cache key '" << storeKeyText(key) << "' not found");
1705 neighborCountIgnored(p);
1706 return;
1707 }
1708
1709 /* check if someone is already fetching it */
1710 if (EBIT_TEST(e->flags, ENTRY_DISPATCHED)) {
1711 debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key) << "' already being fetched.");
1712 neighborCountIgnored(p);
1713 return;
1714 }
1715
1716 if (mem == nullptr) {
1717 debugs(15, 2, "Ignoring reply for missing mem_obj: " << storeKeyText(key));
1718 neighborCountIgnored(p);
1719 return;
1720 }
1721
1722 if (e->ping_status != PING_WAITING) {
1723 debugs(15, 2, "neighborsUdpAck: Entry " << storeKeyText(key) << " is not PING_WAITING");
1724 neighborCountIgnored(p);
1725 return;
1726 }
1727
1728 if (!e->locked()) {
1729 // TODO: many entries are unlocked; why is this reported at level 1?
1730 debugs(12, DBG_IMPORTANT, "neighborsUdpAck: '" << storeKeyText(key) << "' has no locks");
1731 neighborCountIgnored(p);
1732 return;
1733 }
1734
1735 if (!mem->ircb_data) {
1736 debugs(12, DBG_IMPORTANT, "ERROR: Squid BUG: missing HTCP callback data for " << *e);
1737 neighborCountIgnored(p);
1738 return;
1739 }
1740
1741 if (p) {
1742 ntype = neighborType(p, mem->request->url);
1743 neighborUpdateRtt(p, mem);
1744 }
1745
1746 if (ignoreMulticastReply(p, mem->ircb_data)) {
1747 neighborCountIgnored(p);
1748 return;
1749 }
1750
1751 debugs(15, 3, "neighborsHtcpReply: e = " << e);
1752 // TODO: Refactor (ping_reply_callback,ircb_data) to add CodeContext.
1753 mem->ping_reply_callback(p, ntype, AnyP::PROTO_HTCP, htcp, mem->ircb_data);
1754 }
1755
1756 /*
1757 * Send HTCP CLR messages to all peers configured to receive them.
1758 */
1759 void
1760 neighborsHtcpClear(StoreEntry * e, HttpRequest * req, const HttpRequestMethod &method, htcp_clr_reason reason)
1761 {
1762 CachePeer *p;
1763 char buf[128];
1764
1765 for (p = Config.peers; p; p = p->next) {
1766 if (!p->options.htcp) {
1767 continue;
1768 }
1769 if (p->options.htcp_no_clr) {
1770 continue;
1771 }
1772 if (p->options.htcp_no_purge_clr && reason == HTCP_CLR_PURGE) {
1773 continue;
1774 }
1775 debugs(15, 3, "neighborsHtcpClear: sending CLR to " << p->in_addr.toUrl(buf, 128));
1776 htcpClear(e, req, method, p, reason);
1777 }
1778 }
1779
1780 #endif
1781