]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1996-2025 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 15 Neighbor Routines */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "acl/FilledChecklist.h" | |
13 | #include "anyp/PortCfg.h" | |
14 | #include "base/EnumIterator.h" | |
15 | #include "base/IoManip.h" | |
16 | #include "base/PackableStream.h" | |
17 | #include "base/PrecomputedCodeContext.h" | |
18 | #include "CacheDigest.h" | |
19 | #include "CachePeer.h" | |
20 | #include "CachePeers.h" | |
21 | #include "comm/Connection.h" | |
22 | #include "comm/ConnOpener.h" | |
23 | #include "debug/Messages.h" | |
24 | #include "event.h" | |
25 | #include "FwdState.h" | |
26 | #include "globals.h" | |
27 | #include "htcp.h" | |
28 | #include "HttpRequest.h" | |
29 | #include "icmp/net_db.h" | |
30 | #include "ICP.h" | |
31 | #include "int.h" | |
32 | #include "ip/Address.h" | |
33 | #include "ip/tools.h" | |
34 | #include "ipcache.h" | |
35 | #include "MemObject.h" | |
36 | #include "mgr/Registration.h" | |
37 | #include "multicast.h" | |
38 | #include "neighbors.h" | |
39 | #include "NeighborTypeDomainList.h" | |
40 | #include "pconn.h" | |
41 | #include "PeerDigest.h" | |
42 | #include "PeerPoolMgr.h" | |
43 | #include "PeerSelectState.h" | |
44 | #include "RequestFlags.h" | |
45 | #include "SquidConfig.h" | |
46 | #include "SquidMath.h" | |
47 | #include "stat.h" | |
48 | #include "Store.h" | |
49 | #include "store_key_md5.h" | |
50 | #include "tools.h" | |
51 | ||
52 | /* count mcast group peers every 15 minutes */ | |
53 | #define MCAST_COUNT_RATE 900 | |
54 | ||
55 | bool peerAllowedToUse(const CachePeer *, PeerSelector *); | |
56 | static int peerWouldBePinged(const CachePeer *, PeerSelector *); | |
57 | static void neighborAlive(CachePeer *, const MemObject *, const icp_common_t *); | |
58 | #if USE_HTCP | |
59 | static void neighborAliveHtcp(CachePeer *, const MemObject *, const HtcpReplyData *); | |
60 | #endif | |
61 | static void neighborCountIgnored(CachePeer *); | |
62 | static void peerDnsRefreshCheck(void *); | |
63 | static void peerDnsRefreshStart(); | |
64 | static IPH peerDNSConfigure; | |
65 | static void peerProbeConnect(CachePeer *, const bool reprobeIfBusy = false); | |
66 | static CNCB peerProbeConnectDone; | |
67 | static void peerCountMcastPeersDone(void *data); | |
68 | static void peerCountMcastPeersStart(void *data); | |
69 | static void peerCountMcastPeersSchedule(CachePeer * p, time_t when); | |
70 | static void peerCountMcastPeersAbort(PeerSelector *); | |
71 | static void peerCountMcastPeersCreateAndSend(CachePeer *p); | |
72 | static IRCB peerCountHandleIcpReply; | |
73 | ||
74 | static void neighborIgnoreNonPeer(const Ip::Address &, icp_opcode); | |
75 | static OBJH neighborDumpPeers; | |
76 | static void dump_peers(StoreEntry *, CachePeers *); | |
77 | ||
78 | static unsigned short echo_port; | |
79 | ||
80 | static int NLateReplies = 0; | |
81 | ||
82 | const char * | |
83 | neighborTypeStr(const CachePeer * p) | |
84 | { | |
85 | if (p->type == PEER_NONE) | |
86 | return "Non-Peer"; | |
87 | ||
88 | if (p->type == PEER_SIBLING) | |
89 | return "Sibling"; | |
90 | ||
91 | if (p->type == PEER_MULTICAST) | |
92 | return "Multicast Group"; | |
93 | ||
94 | return "Parent"; | |
95 | } | |
96 | ||
97 | CachePeer * | |
98 | whichPeer(const Ip::Address &from) | |
99 | { | |
100 | int j; | |
101 | ||
102 | debugs(15, 3, "whichPeer: from " << from); | |
103 | ||
104 | for (const auto &p: CurrentCachePeers()) { | |
105 | for (j = 0; j < p->n_addresses; ++j) { | |
106 | if (from == p->addresses[j] && from.port() == p->icp.port) { | |
107 | return p.get(); | |
108 | } | |
109 | } | |
110 | } | |
111 | ||
112 | return nullptr; | |
113 | } | |
114 | ||
115 | peer_t | |
116 | neighborType(const CachePeer * p, const AnyP::Uri &url) | |
117 | { | |
118 | ||
119 | const NeighborTypeDomainList *d = nullptr; | |
120 | ||
121 | for (d = p->typelist; d; d = d->next) { | |
122 | if (0 == matchDomainName(url.host(), d->domain)) | |
123 | if (d->type != PEER_NONE) | |
124 | return d->type; | |
125 | } | |
126 | ||
127 | if (p->type == PEER_MULTICAST && p->options.mcast_siblings) | |
128 | return PEER_SIBLING; | |
129 | ||
130 | return p->type; | |
131 | } | |
132 | ||
133 | /** | |
134 | * \return Whether it is appropriate to fetch REQUEST from PEER. | |
135 | */ | |
136 | bool | |
137 | peerAllowedToUse(const CachePeer * p, PeerSelector * ps) | |
138 | { | |
139 | assert(ps); | |
140 | HttpRequest *request = ps->request; | |
141 | assert(request != nullptr); | |
142 | ||
143 | if (neighborType(p, request->url) == PEER_SIBLING) { | |
144 | if (p->type == PEER_MULTICAST && p->options.mcast_siblings && | |
145 | (request->flags.noCache || request->flags.refresh || request->flags.loopDetected || request->flags.needValidation)) | |
146 | debugs(15, 2, "multicast-siblings optimization match for " << *p << ", " << request->url.authority()); | |
147 | ||
148 | if (request->flags.noCache) | |
149 | return false; | |
150 | ||
151 | if (request->flags.refresh) | |
152 | return false; | |
153 | ||
154 | if (request->flags.loopDetected) | |
155 | return false; | |
156 | ||
157 | if (request->flags.needValidation) | |
158 | return false; | |
159 | } | |
160 | ||
161 | // CONNECT requests are proxy requests. Not to be forwarded to origin servers. | |
162 | // Unless the destination port matches, in which case we MAY perform a 'DIRECT' to this CachePeer. | |
163 | if (p->options.originserver && request->method == Http::METHOD_CONNECT && request->url.port() != p->http_port) | |
164 | return false; | |
165 | ||
166 | if (p->access == nullptr) | |
167 | return true; | |
168 | ||
169 | ACLFilledChecklist checklist(p->access, request); | |
170 | checklist.updateAle(ps->al); | |
171 | checklist.syncAle(request, nullptr); | |
172 | return checklist.fastCheck().allowed(); | |
173 | } | |
174 | ||
175 | /* Return TRUE if it is okay to send an ICP request to this CachePeer. */ | |
176 | static int | |
177 | peerWouldBePinged(const CachePeer * p, PeerSelector * ps) | |
178 | { | |
179 | assert(ps); | |
180 | HttpRequest *request = ps->request; | |
181 | ||
182 | if (p->icp.port == 0) | |
183 | return 0; | |
184 | ||
185 | if (p->options.no_query) | |
186 | return 0; | |
187 | ||
188 | if (p->options.mcast_responder) | |
189 | return 0; | |
190 | ||
191 | if (p->n_addresses == 0) | |
192 | return 0; | |
193 | ||
194 | if (p->options.background_ping && (squid_curtime - p->stats.last_query < Config.backgroundPingRate)) | |
195 | return 0; | |
196 | ||
197 | /* the case below seems strange, but can happen if the | |
198 | * URL host is on the other side of a firewall */ | |
199 | if (p->type == PEER_SIBLING) | |
200 | if (!request->flags.hierarchical) | |
201 | return 0; | |
202 | ||
203 | if (!peerAllowedToUse(p, ps)) | |
204 | return 0; | |
205 | ||
206 | /* Ping dead peers every timeout interval */ | |
207 | if (squid_curtime - p->stats.last_query > Config.Timeout.deadPeer) | |
208 | return 1; | |
209 | ||
210 | if (!neighborUp(p)) | |
211 | return 0; | |
212 | ||
213 | return 1; | |
214 | } | |
215 | ||
216 | bool | |
217 | peerCanOpenMore(const CachePeer *p) | |
218 | { | |
219 | const int effectiveLimit = p->max_conn <= 0 ? Squid_MaxFD : p->max_conn; | |
220 | const int remaining = effectiveLimit - p->stats.conn_open; | |
221 | debugs(15, 7, remaining << '=' << effectiveLimit << '-' << p->stats.conn_open); | |
222 | return remaining > 0; | |
223 | } | |
224 | ||
225 | bool | |
226 | peerHasConnAvailable(const CachePeer *p) | |
227 | { | |
228 | // Standby connections can be used without opening new connections. | |
229 | const int standbys = p->standby.pool ? p->standby.pool->count() : 0; | |
230 | ||
231 | // XXX: Some idle pconns can be used without opening new connections. | |
232 | // Complication: Idle pconns cannot be reused for some requests. | |
233 | const int usableIdles = 0; | |
234 | ||
235 | const int available = standbys + usableIdles; | |
236 | debugs(15, 7, available << '=' << standbys << '+' << usableIdles); | |
237 | return available > 0; | |
238 | } | |
239 | ||
240 | void | |
241 | peerConnClosed(CachePeer *p) | |
242 | { | |
243 | --p->stats.conn_open; | |
244 | if (p->standby.waitingForClose && peerCanOpenMore(p)) { | |
245 | p->standby.waitingForClose = false; | |
246 | PeerPoolMgr::Checkpoint(p->standby.mgr, "conn closed"); | |
247 | } | |
248 | } | |
249 | ||
250 | /* Return TRUE if it is okay to send an HTTP request to this CachePeer. */ | |
251 | int | |
252 | peerHTTPOkay(const CachePeer * p, PeerSelector * ps) | |
253 | { | |
254 | if (!peerCanOpenMore(p) && !peerHasConnAvailable(p)) | |
255 | return 0; | |
256 | ||
257 | if (!peerAllowedToUse(p, ps)) | |
258 | return 0; | |
259 | ||
260 | if (!neighborUp(p)) | |
261 | return 0; | |
262 | ||
263 | return 1; | |
264 | } | |
265 | ||
266 | int | |
267 | neighborsCount(PeerSelector *ps) | |
268 | { | |
269 | int count = 0; | |
270 | ||
271 | for (const auto &p: CurrentCachePeers()) | |
272 | if (peerWouldBePinged(p.get(), ps)) | |
273 | ++count; | |
274 | ||
275 | debugs(15, 3, "neighborsCount: " << count); | |
276 | ||
277 | return count; | |
278 | } | |
279 | ||
280 | CachePeer * | |
281 | getFirstUpParent(PeerSelector *ps) | |
282 | { | |
283 | assert(ps); | |
284 | HttpRequest *request = ps->request; | |
285 | ||
286 | for (const auto &peer: CurrentCachePeers()) { | |
287 | const auto p = peer.get(); | |
288 | ||
289 | if (!neighborUp(p)) | |
290 | continue; | |
291 | ||
292 | if (neighborType(p, request->url) != PEER_PARENT) | |
293 | continue; | |
294 | ||
295 | if (!peerHTTPOkay(p, ps)) | |
296 | continue; | |
297 | ||
298 | debugs(15, 3, "returning " << *p); | |
299 | return p; | |
300 | } | |
301 | ||
302 | debugs(15, 3, "none found"); | |
303 | return nullptr; | |
304 | } | |
305 | ||
306 | CachePeer * | |
307 | getRoundRobinParent(PeerSelector *ps) | |
308 | { | |
309 | assert(ps); | |
310 | HttpRequest *request = ps->request; | |
311 | ||
312 | CachePeer *q = nullptr; | |
313 | ||
314 | for (const auto &peer: CurrentCachePeers()) { | |
315 | const auto p = peer.get(); | |
316 | if (!p->options.roundrobin) | |
317 | continue; | |
318 | ||
319 | if (neighborType(p, request->url) != PEER_PARENT) | |
320 | continue; | |
321 | ||
322 | if (!peerHTTPOkay(p, ps)) | |
323 | continue; | |
324 | ||
325 | if (p->weight == 0) | |
326 | continue; | |
327 | ||
328 | if (q) { | |
329 | if (p->weight == q->weight) { | |
330 | if (q->rr_count < p->rr_count) | |
331 | continue; | |
332 | } else if ( ((double) q->rr_count / q->weight) < ((double) p->rr_count / p->weight)) { | |
333 | continue; | |
334 | } | |
335 | } | |
336 | ||
337 | q = p; | |
338 | } | |
339 | ||
340 | if (q) | |
341 | ++ q->rr_count; | |
342 | ||
343 | debugs(15, 3, "returning " << RawPointer(q).orNil()); | |
344 | ||
345 | return q; | |
346 | } | |
347 | ||
348 | CachePeer * | |
349 | getWeightedRoundRobinParent(PeerSelector *ps) | |
350 | { | |
351 | assert(ps); | |
352 | HttpRequest *request = ps->request; | |
353 | ||
354 | CachePeer *q = nullptr; | |
355 | int weighted_rtt; | |
356 | ||
357 | for (const auto &peer: CurrentCachePeers()) { | |
358 | const auto p = peer.get(); | |
359 | ||
360 | if (!p->options.weighted_roundrobin) | |
361 | continue; | |
362 | ||
363 | if (neighborType(p, request->url) != PEER_PARENT) | |
364 | continue; | |
365 | ||
366 | if (!peerHTTPOkay(p, ps)) | |
367 | continue; | |
368 | ||
369 | if (q && q->rr_count < p->rr_count) | |
370 | continue; | |
371 | ||
372 | q = p; | |
373 | } | |
374 | ||
375 | if (q && q->rr_count > 1000000) | |
376 | for (const auto &p: CurrentCachePeers()) { | |
377 | if (!p->options.weighted_roundrobin) | |
378 | continue; | |
379 | ||
380 | if (neighborType(p.get(), request->url) != PEER_PARENT) | |
381 | continue; | |
382 | ||
383 | p->rr_count = 0; | |
384 | } | |
385 | ||
386 | if (q) { | |
387 | weighted_rtt = (q->stats.rtt - q->basetime) / q->weight; | |
388 | ||
389 | if (weighted_rtt < 1) | |
390 | weighted_rtt = 1; | |
391 | ||
392 | q->rr_count += weighted_rtt; | |
393 | ||
394 | debugs(15, 3, "getWeightedRoundRobinParent: weighted_rtt " << weighted_rtt); | |
395 | } | |
396 | ||
397 | debugs(15, 3, "returning " << RawPointer(q).orNil()); | |
398 | return q; | |
399 | } | |
400 | ||
401 | /** | |
402 | * This gets called every 5 minutes to clear the round-robin counter. | |
403 | * The exact timing is an arbitrary default, set on estimate timing of a | |
404 | * large number of requests in a high-performance environment during the | |
405 | * period. The larger the number of requests between cycled resets the | |
406 | * more balanced the operations. | |
407 | * | |
408 | * \param data unused | |
409 | * | |
410 | * TODO: Make the reset timing a selectable parameter in squid.conf | |
411 | */ | |
412 | static void | |
413 | peerClearRRLoop(void *data) | |
414 | { | |
415 | peerClearRR(); | |
416 | eventAdd("peerClearRR", peerClearRRLoop, data, 5 * 60.0, 0); | |
417 | } | |
418 | ||
419 | /** | |
420 | * This gets called on startup and restart to kick off the CachePeer round-robin | |
421 | * maintenance event. It ensures that no matter how many times its called | |
422 | * no more than one event is scheduled. | |
423 | */ | |
424 | void | |
425 | peerClearRRStart(void) | |
426 | { | |
427 | static bool event_added = false; | |
428 | if (!event_added) { | |
429 | peerClearRRLoop(nullptr); | |
430 | event_added=true; | |
431 | } | |
432 | } | |
433 | ||
434 | /** | |
435 | * Called whenever the round-robin counters need to be reset to a sane state. | |
436 | * So far those times are: | |
437 | * - On startup and reconfigure - to set the counters to sane initial settings. | |
438 | * - When a CachePeer has revived from dead, to prevent the revived CachePeer being | |
439 | * flooded with requests which it has 'missed' during the down period. | |
440 | */ | |
441 | void | |
442 | peerClearRR() | |
443 | { | |
444 | for (const auto &p: CurrentCachePeers()) | |
445 | p->rr_count = 1; | |
446 | } | |
447 | ||
448 | void | |
449 | peerAlive(CachePeer *p) | |
450 | { | |
451 | if (p->stats.logged_state == PEER_DEAD && p->tcp_up) { | |
452 | debugs(15, DBG_IMPORTANT, "Detected REVIVED " << neighborTypeStr(p) << ": " << *p); | |
453 | p->stats.logged_state = PEER_ALIVE; | |
454 | peerClearRR(); | |
455 | if (p->standby.mgr.valid()) | |
456 | PeerPoolMgr::Checkpoint(p->standby.mgr, "revived peer"); | |
457 | } | |
458 | ||
459 | p->stats.last_reply = squid_curtime; | |
460 | p->stats.probe_start = 0; | |
461 | ||
462 | // TODO: Remove or explain how we could detect an alive peer without IP addresses | |
463 | if (!p->n_addresses) | |
464 | ipcache_nbgethostbyname(p->host, peerDNSConfigure, p); | |
465 | } | |
466 | ||
467 | CachePeer * | |
468 | getDefaultParent(PeerSelector *ps) | |
469 | { | |
470 | assert(ps); | |
471 | HttpRequest *request = ps->request; | |
472 | ||
473 | for (const auto &peer: CurrentCachePeers()) { | |
474 | const auto p = peer.get(); | |
475 | ||
476 | if (neighborType(p, request->url) != PEER_PARENT) | |
477 | continue; | |
478 | ||
479 | if (!p->options.default_parent) | |
480 | continue; | |
481 | ||
482 | if (!peerHTTPOkay(p, ps)) | |
483 | continue; | |
484 | ||
485 | debugs(15, 3, "returning " << *p); | |
486 | ||
487 | return p; | |
488 | } | |
489 | ||
490 | // TODO: Refactor similar get*() functions to use our return/reporting style | |
491 | debugs(15, 3, "none found"); | |
492 | return nullptr; | |
493 | } | |
494 | ||
495 | static void | |
496 | neighborsRegisterWithCacheManager() | |
497 | { | |
498 | Mgr::RegisterAction("server_list", | |
499 | "Peer Cache Statistics", | |
500 | neighborDumpPeers, 0, 1); | |
501 | } | |
502 | ||
503 | void | |
504 | neighbors_init(void) | |
505 | { | |
506 | struct servent *sep = nullptr; | |
507 | const char *me = getMyHostname(); | |
508 | ||
509 | neighborsRegisterWithCacheManager(); | |
510 | ||
511 | if (Comm::IsConnOpen(icpIncomingConn)) { | |
512 | RawCachePeers peersToRemove; | |
513 | ||
514 | for (const auto &thisPeer: CurrentCachePeers()) { | |
515 | if (0 != strcmp(thisPeer->host, me)) | |
516 | continue; | |
517 | ||
518 | for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) { | |
519 | if (thisPeer->http_port != s->s.port()) | |
520 | continue; | |
521 | ||
522 | debugs(15, DBG_IMPORTANT, "WARNING: Peer looks like this host." << | |
523 | Debug::Extra << "Ignoring cache_peer " << *thisPeer); | |
524 | ||
525 | peersToRemove.push_back(thisPeer.get()); | |
526 | break; // avoid warning about (and removing) the same CachePeer twice | |
527 | } | |
528 | } | |
529 | ||
530 | while (peersToRemove.size()) { | |
531 | const auto p = peersToRemove.back(); | |
532 | peersToRemove.pop_back(); | |
533 | DeleteConfigured(p); | |
534 | } | |
535 | } | |
536 | ||
537 | peerDnsRefreshStart(); | |
538 | ||
539 | sep = getservbyname("echo", "udp"); | |
540 | echo_port = sep ? ntohs((unsigned short) sep->s_port) : 7; | |
541 | } | |
542 | ||
543 | int | |
544 | neighborsUdpPing(HttpRequest * request, | |
545 | StoreEntry * entry, | |
546 | IRCB * callback, | |
547 | PeerSelector *ps, | |
548 | int *exprep, | |
549 | int *timeout) | |
550 | { | |
551 | const char *url = entry->url(); | |
552 | MemObject *mem = entry->mem_obj; | |
553 | int reqnum = 0; | |
554 | int flags; | |
555 | int peers_pinged = 0; | |
556 | int parent_timeout = 0, parent_exprep = 0; | |
557 | int sibling_timeout = 0, sibling_exprep = 0; | |
558 | int mcast_timeout = 0, mcast_exprep = 0; | |
559 | ||
560 | if (Config.peers == nullptr) | |
561 | return 0; | |
562 | ||
563 | assert(!entry->hasDisk()); | |
564 | ||
565 | mem->start_ping = current_time; | |
566 | ||
567 | mem->ping_reply_callback = callback; | |
568 | ||
569 | mem->ircb_data = ps; | |
570 | ||
571 | reqnum = icpSetCacheKey((const cache_key *)entry->key); | |
572 | ||
573 | const auto savedContext = CodeContext::Current(); | |
574 | for (size_t i = 0; i < Config.peers->size(); ++i) { | |
575 | const auto p = &Config.peers->nextPeerToPing(i); | |
576 | ||
577 | CodeContext::Reset(p->probeCodeContext); | |
578 | ||
579 | debugs(15, 5, "candidate: " << *p); | |
580 | ||
581 | if (!peerWouldBePinged(p, ps)) | |
582 | continue; /* next CachePeer */ | |
583 | ||
584 | ++peers_pinged; | |
585 | ||
586 | debugs(15, 4, "pinging cache_peer " << *p << " for '" << url << "'"); | |
587 | ||
588 | debugs(15, 3, "neighborsUdpPing: key = '" << entry->getMD5Text() << "'"); | |
589 | ||
590 | debugs(15, 3, "neighborsUdpPing: reqnum = " << reqnum); | |
591 | ||
592 | #if USE_HTCP | |
593 | if (p->options.htcp && !p->options.htcp_only_clr) { | |
594 | if (Config.Port.htcp <= 0) { | |
595 | debugs(15, DBG_CRITICAL, "ERROR: HTCP is disabled! Cannot send HTCP request to peer."); | |
596 | continue; | |
597 | } | |
598 | ||
599 | debugs(15, 3, "neighborsUdpPing: sending HTCP query"); | |
600 | if (htcpQuery(entry, request, p) <= 0) | |
601 | continue; // unable to send. | |
602 | } else | |
603 | #endif | |
604 | { | |
605 | if (Config.Port.icp <= 0 || !Comm::IsConnOpen(icpOutgoingConn)) { | |
606 | debugs(15, DBG_CRITICAL, "ERROR: ICP is disabled! Cannot send ICP request to peer."); | |
607 | continue; | |
608 | } else { | |
609 | ||
610 | if (p->type == PEER_MULTICAST) | |
611 | mcastSetTtl(icpOutgoingConn->fd, p->mcast.ttl); | |
612 | ||
613 | if (p->icp.port == echo_port) { | |
614 | debugs(15, 4, "neighborsUdpPing: Looks like a dumb cache, send DECHO ping"); | |
615 | // TODO: Get ALE from callback_data if possible. | |
616 | icpCreateAndSend(ICP_DECHO, 0, url, reqnum, 0, | |
617 | icpOutgoingConn->fd, p->in_addr, nullptr); | |
618 | } else { | |
619 | flags = 0; | |
620 | ||
621 | if (Config.onoff.query_icmp) | |
622 | if (p->icp.version == ICP_VERSION_2) | |
623 | flags |= ICP_FLAG_SRC_RTT; | |
624 | ||
625 | // TODO: Get ALE from callback_data if possible. | |
626 | icpCreateAndSend(ICP_QUERY, flags, url, reqnum, 0, | |
627 | icpOutgoingConn->fd, p->in_addr, nullptr); | |
628 | } | |
629 | } | |
630 | } | |
631 | ||
632 | ++ p->stats.pings_sent; | |
633 | ||
634 | if (p->type == PEER_MULTICAST) { | |
635 | mcast_exprep += p->mcast.n_replies_expected; | |
636 | mcast_timeout += (p->stats.rtt * p->mcast.n_replies_expected); | |
637 | } else if (neighborUp(p)) { | |
638 | /* its alive, expect a reply from it */ | |
639 | ||
640 | if (neighborType(p, request->url) == PEER_PARENT) { | |
641 | ++parent_exprep; | |
642 | parent_timeout += p->stats.rtt; | |
643 | } else { | |
644 | ++sibling_exprep; | |
645 | sibling_timeout += p->stats.rtt; | |
646 | } | |
647 | } else { | |
648 | /* Neighbor is dead; ping it anyway, but don't expect a reply */ | |
649 | /* log it once at the threshold */ | |
650 | ||
651 | if (p->stats.logged_state == PEER_ALIVE) { | |
652 | debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(p) << ": " << *p); | |
653 | p->stats.logged_state = PEER_DEAD; | |
654 | } | |
655 | } | |
656 | ||
657 | p->stats.last_query = squid_curtime; | |
658 | ||
659 | /* | |
660 | * keep probe_start == 0 for a multicast CachePeer, | |
661 | * so neighborUp() never says this CachePeer is dead. | |
662 | */ | |
663 | ||
664 | if ((p->type != PEER_MULTICAST) && (p->stats.probe_start == 0)) | |
665 | p->stats.probe_start = squid_curtime; | |
666 | } | |
667 | CodeContext::Reset(savedContext); | |
668 | ||
669 | /* | |
670 | * How many replies to expect? | |
671 | */ | |
672 | *exprep = parent_exprep + sibling_exprep + mcast_exprep; | |
673 | ||
674 | /* | |
675 | * If there is a configured timeout, use it | |
676 | */ | |
677 | if (Config.Timeout.icp_query) | |
678 | *timeout = Config.Timeout.icp_query; | |
679 | else { | |
680 | if (*exprep > 0) { | |
681 | if (parent_exprep) | |
682 | *timeout = 2 * parent_timeout / parent_exprep; | |
683 | else if (mcast_exprep) | |
684 | *timeout = 2 * mcast_timeout / mcast_exprep; | |
685 | else | |
686 | *timeout = 2 * sibling_timeout / sibling_exprep; | |
687 | } else | |
688 | *timeout = 2000; /* 2 seconds */ | |
689 | ||
690 | if (Config.Timeout.icp_query_max) | |
691 | if (*timeout > Config.Timeout.icp_query_max) | |
692 | *timeout = Config.Timeout.icp_query_max; | |
693 | ||
694 | if (*timeout < Config.Timeout.icp_query_min) | |
695 | *timeout = Config.Timeout.icp_query_min; | |
696 | } | |
697 | ||
698 | return peers_pinged; | |
699 | } | |
700 | ||
701 | /* lookup the digest of a given CachePeer */ | |
702 | lookup_t | |
703 | peerDigestLookup(CachePeer * p, PeerSelector * ps) | |
704 | { | |
705 | #if USE_CACHE_DIGESTS | |
706 | assert(ps); | |
707 | HttpRequest *request = ps->request; | |
708 | assert(request); | |
709 | ||
710 | assert(p); | |
711 | debugs(15, 5, "cache_peer " << *p); | |
712 | /* does the peeer have a valid digest? */ | |
713 | ||
714 | if (!p->digest) { | |
715 | debugs(15, 5, "peerDigestLookup: gone!"); | |
716 | return LOOKUP_NONE; | |
717 | } else if (!peerHTTPOkay(p, ps)) { | |
718 | debugs(15, 5, "peerDigestLookup: !peerHTTPOkay"); | |
719 | return LOOKUP_NONE; | |
720 | } else if (!p->digest->flags.needed) { | |
721 | debugs(15, 5, "peerDigestLookup: note need"); | |
722 | peerDigestNeeded(p->digest); | |
723 | return LOOKUP_NONE; | |
724 | } else if (!p->digest->flags.usable) { | |
725 | debugs(15, 5, "peerDigestLookup: !ready && " << (p->digest->flags.requested ? "" : "!") << "requested"); | |
726 | return LOOKUP_NONE; | |
727 | } | |
728 | ||
729 | debugs(15, 5, "OK to lookup cache_peer " << *p); | |
730 | assert(p->digest->cd); | |
731 | /* does digest predict a hit? */ | |
732 | ||
733 | if (!p->digest->cd->contains(storeKeyPublicByRequest(request))) | |
734 | return LOOKUP_MISS; | |
735 | ||
736 | debugs(15, 5, "HIT for cache_peer " << *p); | |
737 | ||
738 | return LOOKUP_HIT; | |
739 | #else | |
740 | (void)p; | |
741 | (void)ps; | |
742 | #endif | |
743 | ||
744 | return LOOKUP_NONE; | |
745 | } | |
746 | ||
747 | /* select best CachePeer based on cache digests */ | |
748 | CachePeer * | |
749 | neighborsDigestSelect(PeerSelector *ps) | |
750 | { | |
751 | CachePeer *best_p = nullptr; | |
752 | #if USE_CACHE_DIGESTS | |
753 | assert(ps); | |
754 | HttpRequest *request = ps->request; | |
755 | ||
756 | int best_rtt = 0; | |
757 | int choice_count = 0; | |
758 | int ichoice_count = 0; | |
759 | int p_rtt; | |
760 | ||
761 | if (!Config.peers) | |
762 | return nullptr; | |
763 | ||
764 | if (!request->flags.hierarchical) | |
765 | return nullptr; | |
766 | ||
767 | storeKeyPublicByRequest(request); | |
768 | ||
769 | for (size_t i = 0; i < Config.peers->size(); ++i) { | |
770 | const auto p = &Config.peers->nextPeerToPing(i); | |
771 | ||
772 | const auto lookup = peerDigestLookup(p, ps); | |
773 | ||
774 | if (lookup == LOOKUP_NONE) | |
775 | continue; | |
776 | ||
777 | ++choice_count; | |
778 | ||
779 | if (lookup == LOOKUP_MISS) | |
780 | continue; | |
781 | ||
782 | p_rtt = netdbHostRtt(p->host); | |
783 | ||
784 | debugs(15, 5, "cache_peer " << *p << " rtt: " << p_rtt); | |
785 | ||
786 | /* is this CachePeer better than others in terms of rtt ? */ | |
787 | if (!best_p || (p_rtt && p_rtt < best_rtt)) { | |
788 | best_p = p; | |
789 | best_rtt = p_rtt; | |
790 | ||
791 | if (p_rtt) /* informative choice (aka educated guess) */ | |
792 | ++ichoice_count; | |
793 | ||
794 | debugs(15, 4, "cache_peer " << *p << " leads with rtt " << best_rtt); | |
795 | } | |
796 | } | |
797 | ||
798 | debugs(15, 4, "neighborsDigestSelect: choices: " << choice_count << " (" << ichoice_count << ")"); | |
799 | peerNoteDigestLookup(request, best_p, | |
800 | best_p ? LOOKUP_HIT : (choice_count ? LOOKUP_MISS : LOOKUP_NONE)); | |
801 | request->hier.n_choices = choice_count; | |
802 | request->hier.n_ichoices = ichoice_count; | |
803 | #else | |
804 | (void)ps; | |
805 | #endif | |
806 | ||
807 | return best_p; | |
808 | } | |
809 | ||
810 | void | |
811 | peerNoteDigestLookup(HttpRequest * request, CachePeer * p, lookup_t lookup) | |
812 | { | |
813 | #if USE_CACHE_DIGESTS | |
814 | if (p) | |
815 | strncpy(request->hier.cd_host, p->host, sizeof(request->hier.cd_host)-1); | |
816 | else | |
817 | *request->hier.cd_host = '\0'; | |
818 | ||
819 | request->hier.cd_lookup = lookup; | |
820 | debugs(15, 4, "cache_peer " << RawPointer(p).orNil() << ", lookup: " << lookup_t_str[lookup]); | |
821 | #else | |
822 | (void)request; | |
823 | (void)p; | |
824 | (void)lookup; | |
825 | #endif | |
826 | } | |
827 | ||
828 | static void | |
829 | neighborAlive(CachePeer * p, const MemObject *, const icp_common_t * header) | |
830 | { | |
831 | peerAlive(p); | |
832 | ++ p->stats.pings_acked; | |
833 | ||
834 | if ((icp_opcode) header->opcode <= ICP_END) | |
835 | ++ p->icp.counts[header->opcode]; | |
836 | ||
837 | p->icp.version = (int) header->version; | |
838 | } | |
839 | ||
840 | static void | |
841 | neighborUpdateRtt(CachePeer * p, MemObject * mem) | |
842 | { | |
843 | int rtt, rtt_av_factor; | |
844 | ||
845 | if (!mem) | |
846 | return; | |
847 | ||
848 | if (!mem->start_ping.tv_sec) | |
849 | return; | |
850 | ||
851 | rtt = tvSubMsec(mem->start_ping, current_time); | |
852 | ||
853 | if (rtt < 1 || rtt > 10000) | |
854 | return; | |
855 | ||
856 | rtt_av_factor = RTT_AV_FACTOR; | |
857 | ||
858 | if (p->options.weighted_roundrobin) | |
859 | rtt_av_factor = RTT_BACKGROUND_AV_FACTOR; | |
860 | ||
861 | p->stats.rtt = Math::intAverage(p->stats.rtt, rtt, p->stats.pings_acked, rtt_av_factor); | |
862 | } | |
863 | ||
864 | #if USE_HTCP | |
865 | static void | |
866 | neighborAliveHtcp(CachePeer * p, const MemObject *, const HtcpReplyData * htcp) | |
867 | { | |
868 | peerAlive(p); | |
869 | ++ p->stats.pings_acked; | |
870 | ++ p->htcp.counts[htcp->hit ? 1 : 0]; | |
871 | p->htcp.version = htcp->version; | |
872 | } | |
873 | ||
874 | #endif | |
875 | ||
876 | static void | |
877 | neighborCountIgnored(CachePeer * p) | |
878 | { | |
879 | if (p == nullptr) | |
880 | return; | |
881 | ||
882 | ++ p->stats.ignored_replies; | |
883 | ||
884 | ++NLateReplies; | |
885 | } | |
886 | ||
887 | static void | |
888 | neighborIgnoreNonPeer(const Ip::Address &from, icp_opcode opcode) | |
889 | { | |
890 | static uint64_t ignoredReplies = 0; | |
891 | if (isPowTen(++ignoredReplies)) { | |
892 | debugs(15, DBG_IMPORTANT, "WARNING: Ignored " << ignoredReplies << " ICP replies from non-peers" << | |
893 | Debug::Extra << "last seen non-peer source address: " << from << | |
894 | Debug::Extra << "last seen ICP reply opcode: " << icp_opcode_str[opcode]); | |
895 | } | |
896 | } | |
897 | ||
898 | /* ignoreMulticastReply | |
899 | * | |
900 | * * We want to ignore replies from multicast peers if the | |
901 | * * cache_host_domain rules would normally prevent the CachePeer | |
902 | * * from being used | |
903 | */ | |
904 | static int | |
905 | ignoreMulticastReply(CachePeer * p, PeerSelector * ps) | |
906 | { | |
907 | if (p == nullptr) | |
908 | return 0; | |
909 | ||
910 | if (!p->options.mcast_responder) | |
911 | return 0; | |
912 | ||
913 | if (peerHTTPOkay(p, ps)) | |
914 | return 0; | |
915 | ||
916 | return 1; | |
917 | } | |
918 | ||
919 | /** | |
920 | * I should attach these records to the entry. We take the first | |
921 | * hit we get our wait until everyone misses. The timeout handler | |
922 | * call needs to nip this shopping list or call one of the misses. | |
923 | * | |
924 | * If a hit process is already started, then sobeit | |
925 | */ | |
926 | void | |
927 | neighborsUdpAck(const cache_key * key, icp_common_t * header, const Ip::Address &from) | |
928 | { | |
929 | CachePeer *p = nullptr; | |
930 | StoreEntry *entry; | |
931 | MemObject *mem = nullptr; | |
932 | peer_t ntype = PEER_NONE; | |
933 | icp_opcode opcode = (icp_opcode) header->opcode; | |
934 | ||
935 | debugs(15, 6, "neighborsUdpAck: opcode " << opcode << " '" << storeKeyText(key) << "'"); | |
936 | ||
937 | if ((entry = Store::Root().findCallbackXXX(key))) | |
938 | mem = entry->mem_obj; | |
939 | ||
940 | if ((p = whichPeer(from))) | |
941 | neighborAlive(p, mem, header); | |
942 | ||
943 | if (opcode > ICP_END) | |
944 | return; | |
945 | ||
946 | const char *opcode_d = icp_opcode_str[opcode]; | |
947 | ||
948 | if (p) | |
949 | neighborUpdateRtt(p, mem); | |
950 | ||
951 | /* Does the entry exist? */ | |
952 | if (nullptr == entry) { | |
953 | debugs(12, 3, "neighborsUdpAck: Cache key '" << storeKeyText(key) << "' not found"); | |
954 | neighborCountIgnored(p); | |
955 | return; | |
956 | } | |
957 | ||
958 | /* check if someone is already fetching it */ | |
959 | if (EBIT_TEST(entry->flags, ENTRY_DISPATCHED)) { | |
960 | debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key) << "' already being fetched."); | |
961 | neighborCountIgnored(p); | |
962 | return; | |
963 | } | |
964 | ||
965 | if (mem == nullptr) { | |
966 | debugs(15, 2, "Ignoring " << opcode_d << " for missing mem_obj: " << storeKeyText(key)); | |
967 | neighborCountIgnored(p); | |
968 | return; | |
969 | } | |
970 | ||
971 | if (entry->ping_status != PING_WAITING) { | |
972 | debugs(15, 2, "neighborsUdpAck: Late " << opcode_d << " for " << storeKeyText(key)); | |
973 | neighborCountIgnored(p); | |
974 | return; | |
975 | } | |
976 | ||
977 | if (!entry->locked()) { | |
978 | // TODO: many entries are unlocked; why is this reported at level 1? | |
979 | debugs(12, DBG_IMPORTANT, "neighborsUdpAck: '" << storeKeyText(key) << "' has no locks"); | |
980 | neighborCountIgnored(p); | |
981 | return; | |
982 | } | |
983 | ||
984 | if (!mem->ircb_data) { | |
985 | debugs(12, DBG_IMPORTANT, "ERROR: Squid BUG: missing ICP callback data for " << *entry); | |
986 | neighborCountIgnored(p); | |
987 | return; | |
988 | } | |
989 | ||
990 | debugs(15, 3, opcode_d << " for " << storeKeyText(key) << " from " << RawPointer(p).orNil("source")); | |
991 | ||
992 | if (p) { | |
993 | ntype = neighborType(p, mem->request->url); | |
994 | } | |
995 | ||
996 | if (ignoreMulticastReply(p, mem->ircb_data)) { | |
997 | neighborCountIgnored(p); | |
998 | } else if (opcode == ICP_MISS) { | |
999 | if (p == nullptr) { | |
1000 | neighborIgnoreNonPeer(from, opcode); | |
1001 | } else { | |
1002 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
1003 | } | |
1004 | } else if (opcode == ICP_HIT) { | |
1005 | if (p == nullptr) { | |
1006 | neighborIgnoreNonPeer(from, opcode); | |
1007 | } else { | |
1008 | header->opcode = ICP_HIT; | |
1009 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
1010 | } | |
1011 | } else if (opcode == ICP_DECHO) { | |
1012 | if (p == nullptr) { | |
1013 | neighborIgnoreNonPeer(from, opcode); | |
1014 | } else if (ntype == PEER_SIBLING) { | |
1015 | debug_trap("neighborsUdpAck: Found non-ICP cache as SIBLING\n"); | |
1016 | debug_trap("neighborsUdpAck: non-ICP neighbors must be a PARENT\n"); | |
1017 | } else { | |
1018 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
1019 | } | |
1020 | } else if (opcode == ICP_SECHO) { | |
1021 | if (p) { | |
1022 | debugs(15, DBG_IMPORTANT, "Ignoring SECHO from neighbor " << *p); | |
1023 | neighborCountIgnored(p); | |
1024 | } else { | |
1025 | debugs(15, DBG_IMPORTANT, "Unsolicited SECHO from " << from); | |
1026 | } | |
1027 | } else if (opcode == ICP_DENIED) { | |
1028 | if (p == nullptr) { | |
1029 | neighborIgnoreNonPeer(from, opcode); | |
1030 | } else if (p->stats.pings_acked > 100) { | |
1031 | if (100 * p->icp.counts[ICP_DENIED] / p->stats.pings_acked > 95) { | |
1032 | debugs(15, DBG_CRITICAL, "Disabling cache_peer " << *p << | |
1033 | " because over 95% of its replies are UDP_DENIED"); | |
1034 | DeleteConfigured(p); | |
1035 | p = nullptr; | |
1036 | } else { | |
1037 | neighborCountIgnored(p); | |
1038 | } | |
1039 | } | |
1040 | } else if (opcode == ICP_MISS_NOFETCH) { | |
1041 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
1042 | } else { | |
1043 | debugs(15, DBG_CRITICAL, "ERROR: neighborsUdpAck: Unexpected ICP reply: " << opcode_d); | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | CachePeer * | |
1048 | findCachePeerByName(const char * const name) | |
1049 | { | |
1050 | for (const auto &p: CurrentCachePeers()) { | |
1051 | if (!strcasecmp(name, p->name)) | |
1052 | return p.get(); | |
1053 | } | |
1054 | return nullptr; | |
1055 | } | |
1056 | ||
1057 | int | |
1058 | neighborUp(const CachePeer * p) | |
1059 | { | |
1060 | if (!p->tcp_up) { | |
1061 | CallService(p->probeCodeContext, [&] { | |
1062 | peerProbeConnect(const_cast<CachePeer*>(p)); | |
1063 | }); | |
1064 | return 0; | |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * The CachePeer can not be UP if we don't have any IP addresses | |
1069 | * for it. | |
1070 | */ | |
1071 | if (0 == p->n_addresses) { | |
1072 | debugs(15, 8, "DOWN (no-ip): " << *p); | |
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | if (p->options.no_query) { | |
1077 | debugs(15, 8, "UP (no-query): " << *p); | |
1078 | return 1; | |
1079 | } | |
1080 | ||
1081 | if (p->stats.probe_start != 0 && | |
1082 | squid_curtime - p->stats.probe_start > Config.Timeout.deadPeer) { | |
1083 | debugs(15, 8, "DOWN (dead): " << *p); | |
1084 | return 0; | |
1085 | } | |
1086 | ||
1087 | debugs(15, 8, "UP: " << *p); | |
1088 | return 1; | |
1089 | } | |
1090 | ||
1091 | time_t | |
1092 | positiveTimeout(const time_t timeout) | |
1093 | { | |
1094 | return max(static_cast<time_t>(1), timeout); | |
1095 | } | |
1096 | ||
1097 | static void | |
1098 | peerDNSConfigure(const ipcache_addrs *ia, const Dns::LookupDetails &, void *data) | |
1099 | { | |
1100 | // TODO: connections to no-longer valid IP addresses should be | |
1101 | // closed when we can detect such IP addresses. | |
1102 | ||
1103 | CachePeer *p = (CachePeer *)data; | |
1104 | ||
1105 | if (p->n_addresses == 0) { | |
1106 | debugs(15, Important(29), "Configuring " << neighborTypeStr(p) << " " << *p); | |
1107 | ||
1108 | if (p->type == PEER_MULTICAST) | |
1109 | debugs(15, DBG_IMPORTANT, " Multicast TTL = " << p->mcast.ttl); | |
1110 | } | |
1111 | ||
1112 | p->n_addresses = 0; | |
1113 | ||
1114 | if (ia == nullptr) { | |
1115 | debugs(0, DBG_CRITICAL, "WARNING: DNS lookup for '" << *p << "' failed!"); | |
1116 | return; | |
1117 | } | |
1118 | ||
1119 | if (ia->empty()) { | |
1120 | debugs(0, DBG_CRITICAL, "WARNING: No IP address found for '" << *p << "'!"); | |
1121 | return; | |
1122 | } | |
1123 | ||
1124 | for (const auto &ip: ia->goodAndBad()) { // TODO: Consider using just good(). | |
1125 | if (p->n_addresses < PEER_MAX_ADDRESSES) { | |
1126 | const auto idx = p->n_addresses++; | |
1127 | p->addresses[idx] = ip; | |
1128 | debugs(15, 2, "--> IP address #" << idx << ": " << p->addresses[idx]); | |
1129 | } else { | |
1130 | debugs(15, 3, "ignoring remaining " << (ia->size() - p->n_addresses) << " ips"); | |
1131 | break; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | p->in_addr.setEmpty(); | |
1136 | p->in_addr = p->addresses[0]; | |
1137 | p->in_addr.port(p->icp.port); | |
1138 | ||
1139 | peerProbeConnect(p, true); // detect any died or revived peers ASAP | |
1140 | ||
1141 | if (p->type == PEER_MULTICAST) | |
1142 | peerCountMcastPeersSchedule(p, 10); | |
1143 | ||
1144 | #if USE_ICMP | |
1145 | if (p->type != PEER_MULTICAST && IamWorkerProcess()) | |
1146 | if (!p->options.no_netdb_exchange) | |
1147 | eventAddIsh("netdbExchangeStart", netdbExchangeStart, p, 30.0, 1); | |
1148 | #endif | |
1149 | ||
1150 | if (p->standby.mgr.valid()) | |
1151 | PeerPoolMgr::Checkpoint(p->standby.mgr, "resolved peer"); | |
1152 | } | |
1153 | ||
1154 | static void | |
1155 | peerScheduleDnsRefreshCheck(const double delayInSeconds) | |
1156 | { | |
1157 | if (eventFind(peerDnsRefreshCheck, nullptr)) | |
1158 | eventDelete(peerDnsRefreshCheck, nullptr); | |
1159 | eventAddIsh("peerDnsRefreshCheck", peerDnsRefreshCheck, nullptr, delayInSeconds, 1); | |
1160 | } | |
1161 | ||
1162 | static void | |
1163 | peerDnsRefreshCheck(void *) | |
1164 | { | |
1165 | if (!statSawRecentRequests()) { | |
1166 | /* no recent client traffic, wait a bit */ | |
1167 | peerScheduleDnsRefreshCheck(180.0); | |
1168 | return; | |
1169 | } | |
1170 | ||
1171 | peerDnsRefreshStart(); | |
1172 | } | |
1173 | ||
1174 | static void | |
1175 | peerDnsRefreshStart() | |
1176 | { | |
1177 | const auto savedContext = CodeContext::Current(); | |
1178 | for (const auto &p: CurrentCachePeers()) { | |
1179 | CodeContext::Reset(p->probeCodeContext); | |
1180 | ipcache_nbgethostbyname(p->host, peerDNSConfigure, p.get()); | |
1181 | } | |
1182 | CodeContext::Reset(savedContext); | |
1183 | ||
1184 | peerScheduleDnsRefreshCheck(3600.0); | |
1185 | } | |
1186 | ||
1187 | /// whether new TCP probes are currently banned | |
1188 | static bool | |
1189 | peerProbeIsBusy(const CachePeer *p) | |
1190 | { | |
1191 | if (p->testing_now > 0) { | |
1192 | debugs(15, 8, "yes, probing " << p); | |
1193 | return true; | |
1194 | } | |
1195 | if (squid_curtime - p->stats.last_connect_probe == 0) { | |
1196 | debugs(15, 8, "yes, just probed " << p); | |
1197 | return true; | |
1198 | } | |
1199 | return false; | |
1200 | } | |
1201 | /* | |
1202 | * peerProbeConnect will be called on dead peers by neighborUp | |
1203 | */ | |
1204 | static void | |
1205 | peerProbeConnect(CachePeer *p, const bool reprobeIfBusy) | |
1206 | { | |
1207 | if (peerProbeIsBusy(p)) { | |
1208 | p->reprobe = reprobeIfBusy; | |
1209 | return; | |
1210 | } | |
1211 | p->reprobe = false; | |
1212 | ||
1213 | const auto ctimeout = p->connectTimeout(); | |
1214 | /* for each IP address of this CachePeer. find one that we can connect to and probe it. */ | |
1215 | for (int i = 0; i < p->n_addresses; ++i) { | |
1216 | Comm::ConnectionPointer conn = new Comm::Connection; | |
1217 | conn->remote = p->addresses[i]; | |
1218 | conn->remote.port(p->http_port); | |
1219 | conn->setPeer(p); | |
1220 | getOutgoingAddress(nullptr, conn); | |
1221 | ||
1222 | ++ p->testing_now; | |
1223 | ||
1224 | AsyncCall::Pointer call = commCbCall(15,3, "peerProbeConnectDone", CommConnectCbPtrFun(peerProbeConnectDone, p)); | |
1225 | Comm::ConnOpener *cs = new Comm::ConnOpener(conn, call, ctimeout); | |
1226 | cs->setHost(p->host); | |
1227 | AsyncJob::Start(cs); | |
1228 | } | |
1229 | ||
1230 | p->stats.last_connect_probe = squid_curtime; | |
1231 | } | |
1232 | ||
1233 | static void | |
1234 | peerProbeConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int, void *data) | |
1235 | { | |
1236 | CachePeer *p = (CachePeer*)data; | |
1237 | ||
1238 | if (status == Comm::OK) | |
1239 | p->noteSuccess(); | |
1240 | else | |
1241 | p->noteFailure(); | |
1242 | ||
1243 | -- p->testing_now; | |
1244 | conn->close(); | |
1245 | // TODO: log this traffic. | |
1246 | ||
1247 | if (p->reprobe) | |
1248 | peerProbeConnect(p); | |
1249 | } | |
1250 | ||
1251 | static void | |
1252 | peerCountMcastPeersSchedule(CachePeer * p, time_t when) | |
1253 | { | |
1254 | if (p->mcast.flags.count_event_pending) | |
1255 | return; | |
1256 | ||
1257 | eventAdd("peerCountMcastPeersStart", | |
1258 | peerCountMcastPeersStart, | |
1259 | p, | |
1260 | (double) when, 1); | |
1261 | ||
1262 | p->mcast.flags.count_event_pending = true; | |
1263 | } | |
1264 | ||
1265 | static void | |
1266 | peerCountMcastPeersStart(void *data) | |
1267 | { | |
1268 | const auto peer = static_cast<CachePeer*>(data); | |
1269 | CallContextCreator([peer] { | |
1270 | peerCountMcastPeersCreateAndSend(peer); | |
1271 | }); | |
1272 | peerCountMcastPeersSchedule(peer, MCAST_COUNT_RATE); | |
1273 | } | |
1274 | ||
1275 | /// initiates an ICP transaction to a multicast peer | |
1276 | static void | |
1277 | peerCountMcastPeersCreateAndSend(CachePeer * const p) | |
1278 | { | |
1279 | // XXX: Do not create lots of complex fake objects (while abusing their | |
1280 | // APIs) to pass around a few basic data points like start_ping and ping! | |
1281 | MemObject *mem; | |
1282 | int reqnum; | |
1283 | // TODO: use class AnyP::Uri instead of constructing and re-parsing a string | |
1284 | LOCAL_ARRAY(char, url, MAX_URL); | |
1285 | assert(p->type == PEER_MULTICAST); | |
1286 | p->mcast.flags.count_event_pending = false; | |
1287 | snprintf(url, MAX_URL, "http://"); | |
1288 | p->in_addr.toUrl(url+7, MAX_URL -8 ); | |
1289 | strcat(url, "/"); | |
1290 | const auto mx = MasterXaction::MakePortless<XactionInitiator::initPeerMcast>(); | |
1291 | auto *req = HttpRequest::FromUrlXXX(url, mx); | |
1292 | assert(req != nullptr); | |
1293 | const AccessLogEntry::Pointer ale = new AccessLogEntry; | |
1294 | ale->request = req; | |
1295 | CodeContext::Reset(ale); | |
1296 | StoreEntry *fake = storeCreateEntry(url, url, RequestFlags(), Http::METHOD_GET); | |
1297 | const auto psstate = new PeerSelector(nullptr); | |
1298 | psstate->request = req; | |
1299 | HTTPMSGLOCK(psstate->request); | |
1300 | psstate->entry = fake; | |
1301 | psstate->peerCountMcastPeerXXX = cbdataReference(p); | |
1302 | psstate->ping.start = current_time; | |
1303 | psstate->al = ale; | |
1304 | mem = fake->mem_obj; | |
1305 | mem->request = psstate->request; | |
1306 | mem->start_ping = current_time; | |
1307 | mem->ping_reply_callback = peerCountHandleIcpReply; | |
1308 | mem->ircb_data = psstate; | |
1309 | mcastSetTtl(icpOutgoingConn->fd, p->mcast.ttl); | |
1310 | p->mcast.id = mem->id; | |
1311 | reqnum = icpSetCacheKey((const cache_key *)fake->key); | |
1312 | icpCreateAndSend(ICP_QUERY, 0, url, reqnum, 0, | |
1313 | icpOutgoingConn->fd, p->in_addr, psstate->al); | |
1314 | fake->ping_status = PING_WAITING; // TODO: refactor to use PeerSelector::startPingWaiting() | |
1315 | eventAdd("peerCountMcastPeersDone", | |
1316 | peerCountMcastPeersDone, | |
1317 | psstate, | |
1318 | Config.Timeout.mcast_icp_query / 1000.0, 1); | |
1319 | p->mcast.flags.counting = true; | |
1320 | } | |
1321 | ||
1322 | static void | |
1323 | peerCountMcastPeersDone(void *data) | |
1324 | { | |
1325 | const auto psstate = static_cast<PeerSelector*>(data); | |
1326 | CallBack(psstate->al, [psstate] { | |
1327 | peerCountMcastPeersAbort(psstate); | |
1328 | delete psstate; | |
1329 | }); | |
1330 | } | |
1331 | ||
1332 | /// ends counting of multicast ICP replies | |
1333 | /// to the ICP query initiated by peerCountMcastPeersCreateAndSend() | |
1334 | static void | |
1335 | peerCountMcastPeersAbort(PeerSelector * const psstate) | |
1336 | { | |
1337 | StoreEntry *fake = psstate->entry; | |
1338 | ||
1339 | if (cbdataReferenceValid(psstate->peerCountMcastPeerXXX)) { | |
1340 | CachePeer *p = (CachePeer *)psstate->peerCountMcastPeerXXX; | |
1341 | p->mcast.flags.counting = false; | |
1342 | p->mcast.avg_n_members = Math::doubleAverage(p->mcast.avg_n_members, (double) psstate->ping.n_recv, ++p->mcast.n_times_counted, 10); | |
1343 | debugs(15, DBG_IMPORTANT, "Group " << *p << ": " << psstate->ping.n_recv << | |
1344 | " replies, "<< std::setw(4)<< std::setprecision(2) << | |
1345 | p->mcast.avg_n_members <<" average, RTT " << p->stats.rtt); | |
1346 | p->mcast.n_replies_expected = (int) p->mcast.avg_n_members; | |
1347 | } | |
1348 | ||
1349 | cbdataReferenceDone(psstate->peerCountMcastPeerXXX); | |
1350 | ||
1351 | fake->abort(); // sets ENTRY_ABORTED and initiates related cleanup | |
1352 | fake->mem_obj->request = nullptr; | |
1353 | fake->unlock("peerCountMcastPeersDone"); | |
1354 | } | |
1355 | ||
1356 | static void | |
1357 | peerCountHandleIcpReply(CachePeer * p, peer_t, AnyP::ProtocolType proto, void *, void *data) | |
1358 | { | |
1359 | const auto psstate = static_cast<PeerSelector*>(data); | |
1360 | StoreEntry *fake = psstate->entry; | |
1361 | assert(fake); | |
1362 | MemObject *mem = fake->mem_obj; | |
1363 | assert(mem); | |
1364 | int rtt = tvSubMsec(mem->start_ping, current_time); | |
1365 | assert(proto == AnyP::PROTO_ICP); | |
1366 | ++ psstate->ping.n_recv; | |
1367 | int rtt_av_factor = RTT_AV_FACTOR; | |
1368 | ||
1369 | if (p->options.weighted_roundrobin) | |
1370 | rtt_av_factor = RTT_BACKGROUND_AV_FACTOR; | |
1371 | ||
1372 | p->stats.rtt = Math::intAverage(p->stats.rtt, rtt, psstate->ping.n_recv, rtt_av_factor); | |
1373 | } | |
1374 | ||
1375 | static void | |
1376 | neighborDumpPeers(StoreEntry * sentry) | |
1377 | { | |
1378 | dump_peers(sentry, Config.peers); | |
1379 | } | |
1380 | ||
1381 | void | |
1382 | dump_peer_options(StoreEntry * sentry, CachePeer * p) | |
1383 | { | |
1384 | PackableStream os(*sentry); | |
1385 | ||
1386 | if (p->options.proxy_only) | |
1387 | os << " proxy-only"; | |
1388 | ||
1389 | if (p->options.no_query) | |
1390 | os << " no-query"; | |
1391 | ||
1392 | if (p->options.background_ping) | |
1393 | os << " background-ping"; | |
1394 | ||
1395 | if (p->options.no_digest) | |
1396 | os << " no-digest"; | |
1397 | ||
1398 | if (p->options.default_parent) | |
1399 | os << " default"; | |
1400 | ||
1401 | if (p->options.roundrobin) | |
1402 | os << " round-robin"; | |
1403 | ||
1404 | if (p->options.carp) | |
1405 | os << " carp"; | |
1406 | ||
1407 | #if USE_AUTH | |
1408 | if (p->options.userhash) | |
1409 | os << " userhash"; | |
1410 | #endif | |
1411 | ||
1412 | if (p->options.sourcehash) | |
1413 | os << " sourcehash"; | |
1414 | ||
1415 | if (p->options.weighted_roundrobin) | |
1416 | os << " weighted-round-robin"; | |
1417 | ||
1418 | if (p->options.mcast_responder) | |
1419 | os << " multicast-responder"; | |
1420 | ||
1421 | if (p->options.mcast_siblings) | |
1422 | os << " multicast-siblings"; | |
1423 | ||
1424 | if (p->weight != 1) | |
1425 | os << " weight=" << p->weight; | |
1426 | ||
1427 | if (p->options.closest_only) | |
1428 | os << " closest-only"; | |
1429 | ||
1430 | #if USE_HTCP | |
1431 | if (p->options.htcp) { | |
1432 | os << " htcp"; | |
1433 | std::vector<const char *, PoolingAllocator<const char *> > opts; | |
1434 | if (p->options.htcp_oldsquid) | |
1435 | opts.push_back("oldsquid"); | |
1436 | if (p->options.htcp_no_clr) | |
1437 | opts.push_back("no-clr"); | |
1438 | if (p->options.htcp_no_purge_clr) | |
1439 | opts.push_back("no-purge-clr"); | |
1440 | if (p->options.htcp_only_clr) | |
1441 | opts.push_back("only-clr"); | |
1442 | if (p->options.htcp_forward_clr) | |
1443 | opts.push_back("forward-clr"); | |
1444 | os << AsList(opts).prefixedBy("=").delimitedBy(","); | |
1445 | } | |
1446 | #endif | |
1447 | ||
1448 | if (p->options.no_netdb_exchange) | |
1449 | os << " no-netdb-exchange"; | |
1450 | ||
1451 | #if USE_DELAY_POOLS | |
1452 | if (p->options.no_delay) | |
1453 | os << " no-delay"; | |
1454 | #endif | |
1455 | ||
1456 | if (p->login) | |
1457 | os << " login=" << p->login; | |
1458 | ||
1459 | if (p->mcast.ttl > 0) | |
1460 | os << " ttl=" << p->mcast.ttl; | |
1461 | ||
1462 | if (p->connect_timeout_raw > 0) | |
1463 | os << " connect-timeout=" << p->connect_timeout_raw; | |
1464 | ||
1465 | if (p->connect_fail_limit != PEER_TCP_MAGIC_COUNT) | |
1466 | os << " connect-fail-limit=" << p->connect_fail_limit; | |
1467 | ||
1468 | #if USE_CACHE_DIGESTS | |
1469 | ||
1470 | if (p->digest_url) | |
1471 | os << " digest-url=" << p->digest_url; | |
1472 | ||
1473 | #endif | |
1474 | ||
1475 | if (p->options.allow_miss) | |
1476 | os << " allow-miss"; | |
1477 | ||
1478 | if (p->options.no_tproxy) | |
1479 | os << " no-tproxy"; | |
1480 | ||
1481 | if (p->max_conn > 0) | |
1482 | os << " max-conn=" << p->max_conn; | |
1483 | ||
1484 | if (p->standby.limit > 0) | |
1485 | os << " standby=" << p->standby.limit; | |
1486 | ||
1487 | if (p->options.originserver) | |
1488 | os << " originserver"; | |
1489 | ||
1490 | if (p->domain) | |
1491 | os << " forceddomain=" << p->domain; | |
1492 | ||
1493 | if (p->connection_auth == 0) | |
1494 | os << " connection-auth=off"; | |
1495 | else if (p->connection_auth == 1) | |
1496 | os << " connection-auth=on"; | |
1497 | else if (p->connection_auth == 2) | |
1498 | os << " connection-auth=auto"; | |
1499 | ||
1500 | p->secure.dumpCfg(os, "tls-"); | |
1501 | os << '\n'; | |
1502 | } | |
1503 | ||
1504 | static void | |
1505 | dump_peers(StoreEntry *sentry, CachePeers *peers) | |
1506 | { | |
1507 | char ntoabuf[MAX_IPSTRLEN]; | |
1508 | int i; | |
1509 | ||
1510 | if (!peers) { | |
1511 | storeAppendPrintf(sentry, "There are no neighbors installed.\n"); | |
1512 | return; | |
1513 | } | |
1514 | ||
1515 | for (const auto &peer: *peers) { | |
1516 | const auto e = peer.get(); | |
1517 | assert(e->host != nullptr); | |
1518 | storeAppendPrintf(sentry, "\n%-11.11s: %s\n", | |
1519 | neighborTypeStr(e), | |
1520 | e->name); | |
1521 | storeAppendPrintf(sentry, "Host : %s/%d/%d\n", | |
1522 | e->host, | |
1523 | e->http_port, | |
1524 | e->icp.port); | |
1525 | storeAppendPrintf(sentry, "Flags :"); | |
1526 | dump_peer_options(sentry, e); | |
1527 | ||
1528 | for (i = 0; i < e->n_addresses; ++i) { | |
1529 | storeAppendPrintf(sentry, "Address[%d] : %s\n", i, | |
1530 | e->addresses[i].toStr(ntoabuf,MAX_IPSTRLEN) ); | |
1531 | } | |
1532 | ||
1533 | storeAppendPrintf(sentry, "Status : %s\n", | |
1534 | neighborUp(e) ? "Up" : "Down"); | |
1535 | storeAppendPrintf(sentry, "FETCHES : %d\n", e->stats.fetches); | |
1536 | storeAppendPrintf(sentry, "OPEN CONNS : %d\n", e->stats.conn_open); | |
1537 | storeAppendPrintf(sentry, "AVG RTT : %d msec\n", e->stats.rtt); | |
1538 | ||
1539 | if (!e->options.no_query) { | |
1540 | storeAppendPrintf(sentry, "LAST QUERY : %8d seconds ago\n", | |
1541 | (int) (squid_curtime - e->stats.last_query)); | |
1542 | ||
1543 | if (e->stats.last_reply > 0) | |
1544 | storeAppendPrintf(sentry, "LAST REPLY : %8d seconds ago\n", | |
1545 | (int) (squid_curtime - e->stats.last_reply)); | |
1546 | else | |
1547 | storeAppendPrintf(sentry, "LAST REPLY : none received\n"); | |
1548 | ||
1549 | storeAppendPrintf(sentry, "PINGS SENT : %8d\n", e->stats.pings_sent); | |
1550 | ||
1551 | storeAppendPrintf(sentry, "PINGS ACKED: %8d %3d%%\n", | |
1552 | e->stats.pings_acked, | |
1553 | Math::intPercent(e->stats.pings_acked, e->stats.pings_sent)); | |
1554 | } | |
1555 | ||
1556 | storeAppendPrintf(sentry, "IGNORED : %8d %3d%%\n", e->stats.ignored_replies, Math::intPercent(e->stats.ignored_replies, e->stats.pings_acked)); | |
1557 | ||
1558 | if (!e->options.no_query) { | |
1559 | storeAppendPrintf(sentry, "Histogram of PINGS ACKED:\n"); | |
1560 | #if USE_HTCP | |
1561 | ||
1562 | if (e->options.htcp) { | |
1563 | storeAppendPrintf(sentry, "\tMisses\t%8d %3d%%\n", | |
1564 | e->htcp.counts[0], | |
1565 | Math::intPercent(e->htcp.counts[0], e->stats.pings_acked)); | |
1566 | storeAppendPrintf(sentry, "\tHits\t%8d %3d%%\n", | |
1567 | e->htcp.counts[1], | |
1568 | Math::intPercent(e->htcp.counts[1], e->stats.pings_acked)); | |
1569 | } else { | |
1570 | #endif | |
1571 | ||
1572 | for (auto op : WholeEnum<icp_opcode>()) { | |
1573 | if (e->icp.counts[op] == 0) | |
1574 | continue; | |
1575 | ||
1576 | storeAppendPrintf(sentry, " %12.12s : %8d %3d%%\n", | |
1577 | icp_opcode_str[op], | |
1578 | e->icp.counts[op], | |
1579 | Math::intPercent(e->icp.counts[op], e->stats.pings_acked)); | |
1580 | } | |
1581 | ||
1582 | #if USE_HTCP | |
1583 | ||
1584 | } | |
1585 | ||
1586 | #endif | |
1587 | ||
1588 | } | |
1589 | ||
1590 | if (e->stats.last_connect_failure) { | |
1591 | storeAppendPrintf(sentry, "Last failed connect() at: %s\n", | |
1592 | Time::FormatHttpd(e->stats.last_connect_failure)); | |
1593 | } | |
1594 | ||
1595 | storeAppendPrintf(sentry, "keep-alive ratio: %d%%\n", Math::intPercent(e->stats.n_keepalives_recv, e->stats.n_keepalives_sent)); | |
1596 | } | |
1597 | } | |
1598 | ||
1599 | #if USE_HTCP | |
1600 | void | |
1601 | neighborsHtcpReply(const cache_key * key, HtcpReplyData * htcp, const Ip::Address &from) | |
1602 | { | |
1603 | StoreEntry *e = Store::Root().findCallbackXXX(key); | |
1604 | MemObject *mem = nullptr; | |
1605 | CachePeer *p; | |
1606 | peer_t ntype = PEER_NONE; | |
1607 | debugs(15, 6, "neighborsHtcpReply: " << | |
1608 | (htcp->hit ? "HIT" : "MISS") << " " << | |
1609 | storeKeyText(key) ); | |
1610 | ||
1611 | if (nullptr != e) | |
1612 | mem = e->mem_obj; | |
1613 | ||
1614 | if ((p = whichPeer(from))) | |
1615 | neighborAliveHtcp(p, mem, htcp); | |
1616 | ||
1617 | /* Does the entry exist? */ | |
1618 | if (nullptr == e) { | |
1619 | debugs(12, 3, "neighyborsHtcpReply: Cache key '" << storeKeyText(key) << "' not found"); | |
1620 | neighborCountIgnored(p); | |
1621 | return; | |
1622 | } | |
1623 | ||
1624 | /* check if someone is already fetching it */ | |
1625 | if (EBIT_TEST(e->flags, ENTRY_DISPATCHED)) { | |
1626 | debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key) << "' already being fetched."); | |
1627 | neighborCountIgnored(p); | |
1628 | return; | |
1629 | } | |
1630 | ||
1631 | if (mem == nullptr) { | |
1632 | debugs(15, 2, "Ignoring reply for missing mem_obj: " << storeKeyText(key)); | |
1633 | neighborCountIgnored(p); | |
1634 | return; | |
1635 | } | |
1636 | ||
1637 | if (e->ping_status != PING_WAITING) { | |
1638 | debugs(15, 2, "neighborsUdpAck: Entry " << storeKeyText(key) << " is not PING_WAITING"); | |
1639 | neighborCountIgnored(p); | |
1640 | return; | |
1641 | } | |
1642 | ||
1643 | if (!e->locked()) { | |
1644 | // TODO: many entries are unlocked; why is this reported at level 1? | |
1645 | debugs(12, DBG_IMPORTANT, "neighborsUdpAck: '" << storeKeyText(key) << "' has no locks"); | |
1646 | neighborCountIgnored(p); | |
1647 | return; | |
1648 | } | |
1649 | ||
1650 | if (!mem->ircb_data) { | |
1651 | debugs(12, DBG_IMPORTANT, "ERROR: Squid BUG: missing HTCP callback data for " << *e); | |
1652 | neighborCountIgnored(p); | |
1653 | return; | |
1654 | } | |
1655 | ||
1656 | if (p) { | |
1657 | ntype = neighborType(p, mem->request->url); | |
1658 | neighborUpdateRtt(p, mem); | |
1659 | } | |
1660 | ||
1661 | if (ignoreMulticastReply(p, mem->ircb_data)) { | |
1662 | neighborCountIgnored(p); | |
1663 | return; | |
1664 | } | |
1665 | ||
1666 | debugs(15, 3, "neighborsHtcpReply: e = " << e); | |
1667 | // TODO: Refactor (ping_reply_callback,ircb_data) to add CodeContext. | |
1668 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_HTCP, htcp, mem->ircb_data); | |
1669 | } | |
1670 | ||
1671 | /* | |
1672 | * Send HTCP CLR messages to all peers configured to receive them. | |
1673 | */ | |
1674 | void | |
1675 | neighborsHtcpClear(StoreEntry * e, HttpRequest * req, const HttpRequestMethod &method, htcp_clr_reason reason) | |
1676 | { | |
1677 | char buf[128]; | |
1678 | ||
1679 | for (const auto &p: CurrentCachePeers()) { | |
1680 | if (!p->options.htcp) { | |
1681 | continue; | |
1682 | } | |
1683 | if (p->options.htcp_no_clr) { | |
1684 | continue; | |
1685 | } | |
1686 | if (p->options.htcp_no_purge_clr && reason == HTCP_CLR_PURGE) { | |
1687 | continue; | |
1688 | } | |
1689 | debugs(15, 3, "neighborsHtcpClear: sending CLR to " << p->in_addr.toUrl(buf, 128)); | |
1690 | htcpClear(e, req, method, p.get(), reason); | |
1691 | } | |
1692 | } | |
1693 | ||
1694 | #endif | |
1695 |