]>
| Commit | Line | Data |
|---|---|---|
| 1 | /* | |
| 2 | * Copyright (C) 1996-2025 The Squid Software Foundation and contributors | |
| 3 | * | |
| 4 | * Squid software is distributed under GPLv2+ license and includes | |
| 5 | * contributions from numerous individuals and organizations. | |
| 6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
| 7 | */ | |
| 8 | ||
| 9 | /* DEBUG: section 15 Neighbor Routines */ | |
| 10 | ||
| 11 | #include "squid.h" | |
| 12 | #include "acl/FilledChecklist.h" | |
| 13 | #include "anyp/PortCfg.h" | |
| 14 | #include "base/EnumIterator.h" | |
| 15 | #include "base/IoManip.h" | |
| 16 | #include "base/PackableStream.h" | |
| 17 | #include "base/PrecomputedCodeContext.h" | |
| 18 | #include "CacheDigest.h" | |
| 19 | #include "CachePeer.h" | |
| 20 | #include "CachePeers.h" | |
| 21 | #include "comm/Connection.h" | |
| 22 | #include "comm/ConnOpener.h" | |
| 23 | #include "compat/netdb.h" | |
| 24 | #include "debug/Messages.h" | |
| 25 | #include "event.h" | |
| 26 | #include "FwdState.h" | |
| 27 | #include "globals.h" | |
| 28 | #include "htcp.h" | |
| 29 | #include "HttpRequest.h" | |
| 30 | #include "icmp/net_db.h" | |
| 31 | #include "ICP.h" | |
| 32 | #include "int.h" | |
| 33 | #include "ip/Address.h" | |
| 34 | #include "ip/tools.h" | |
| 35 | #include "ipcache.h" | |
| 36 | #include "MemObject.h" | |
| 37 | #include "mgr/Registration.h" | |
| 38 | #include "multicast.h" | |
| 39 | #include "neighbors.h" | |
| 40 | #include "NeighborTypeDomainList.h" | |
| 41 | #include "pconn.h" | |
| 42 | #include "PeerDigest.h" | |
| 43 | #include "PeerPoolMgr.h" | |
| 44 | #include "PeerSelectState.h" | |
| 45 | #include "RequestFlags.h" | |
| 46 | #include "SquidConfig.h" | |
| 47 | #include "SquidMath.h" | |
| 48 | #include "stat.h" | |
| 49 | #include "Store.h" | |
| 50 | #include "store_key_md5.h" | |
| 51 | #include "tools.h" | |
| 52 | ||
| 53 | /* count mcast group peers every 15 minutes */ | |
| 54 | #define MCAST_COUNT_RATE 900 | |
| 55 | ||
| 56 | bool peerAllowedToUse(const CachePeer *, PeerSelector *); | |
| 57 | static int peerWouldBePinged(const CachePeer *, PeerSelector *); | |
| 58 | static void neighborAlive(CachePeer *, const MemObject *, const icp_common_t *); | |
| 59 | #if USE_HTCP | |
| 60 | static void neighborAliveHtcp(CachePeer *, const MemObject *, const HtcpReplyData *); | |
| 61 | #endif | |
| 62 | static void neighborCountIgnored(CachePeer *); | |
| 63 | static void peerDnsRefreshCheck(void *); | |
| 64 | static void peerDnsRefreshStart(); | |
| 65 | static IPH peerDNSConfigure; | |
| 66 | static void peerProbeConnect(CachePeer *, const bool reprobeIfBusy = false); | |
| 67 | static CNCB peerProbeConnectDone; | |
| 68 | static void peerCountMcastPeersDone(void *data); | |
| 69 | static void peerCountMcastPeersStart(void *data); | |
| 70 | static void peerCountMcastPeersSchedule(CachePeer * p, time_t when); | |
| 71 | static void peerCountMcastPeersAbort(PeerSelector *); | |
| 72 | static void peerCountMcastPeersCreateAndSend(CachePeer *p); | |
| 73 | static IRCB peerCountHandleIcpReply; | |
| 74 | ||
| 75 | static void neighborIgnoreNonPeer(const Ip::Address &, icp_opcode); | |
| 76 | static OBJH neighborDumpPeers; | |
| 77 | static void dump_peers(StoreEntry *, CachePeers *); | |
| 78 | ||
| 79 | static unsigned short echo_port; | |
| 80 | ||
| 81 | static int NLateReplies = 0; | |
| 82 | ||
| 83 | const char * | |
| 84 | neighborTypeStr(const CachePeer * p) | |
| 85 | { | |
| 86 | if (p->type == PEER_NONE) | |
| 87 | return "Non-Peer"; | |
| 88 | ||
| 89 | if (p->type == PEER_SIBLING) | |
| 90 | return "Sibling"; | |
| 91 | ||
| 92 | if (p->type == PEER_MULTICAST) | |
| 93 | return "Multicast Group"; | |
| 94 | ||
| 95 | return "Parent"; | |
| 96 | } | |
| 97 | ||
| 98 | CachePeer * | |
| 99 | whichPeer(const Ip::Address &from) | |
| 100 | { | |
| 101 | int j; | |
| 102 | ||
| 103 | debugs(15, 3, "whichPeer: from " << from); | |
| 104 | ||
| 105 | for (const auto &p: CurrentCachePeers()) { | |
| 106 | for (j = 0; j < p->n_addresses; ++j) { | |
| 107 | if (from == p->addresses[j] && from.port() == p->icp.port) { | |
| 108 | return p.get(); | |
| 109 | } | |
| 110 | } | |
| 111 | } | |
| 112 | ||
| 113 | return nullptr; | |
| 114 | } | |
| 115 | ||
| 116 | peer_t | |
| 117 | neighborType(const CachePeer * p, const AnyP::Uri &url) | |
| 118 | { | |
| 119 | ||
| 120 | const NeighborTypeDomainList *d = nullptr; | |
| 121 | ||
| 122 | for (d = p->typelist; d; d = d->next) { | |
| 123 | if (0 == matchDomainName(url.host(), d->domain)) | |
| 124 | if (d->type != PEER_NONE) | |
| 125 | return d->type; | |
| 126 | } | |
| 127 | ||
| 128 | if (p->type == PEER_MULTICAST && p->options.mcast_siblings) | |
| 129 | return PEER_SIBLING; | |
| 130 | ||
| 131 | return p->type; | |
| 132 | } | |
| 133 | ||
| 134 | /** | |
| 135 | * \return Whether it is appropriate to fetch REQUEST from PEER. | |
| 136 | */ | |
| 137 | bool | |
| 138 | peerAllowedToUse(const CachePeer * p, PeerSelector * ps) | |
| 139 | { | |
| 140 | assert(ps); | |
| 141 | HttpRequest *request = ps->request; | |
| 142 | assert(request != nullptr); | |
| 143 | ||
| 144 | if (neighborType(p, request->url) == PEER_SIBLING) { | |
| 145 | if (p->type == PEER_MULTICAST && p->options.mcast_siblings && | |
| 146 | (request->flags.noCache || request->flags.refresh || request->flags.loopDetected || request->flags.needValidation)) | |
| 147 | debugs(15, 2, "multicast-siblings optimization match for " << *p << ", " << request->url.authority()); | |
| 148 | ||
| 149 | if (request->flags.noCache) | |
| 150 | return false; | |
| 151 | ||
| 152 | if (request->flags.refresh) | |
| 153 | return false; | |
| 154 | ||
| 155 | if (request->flags.loopDetected) | |
| 156 | return false; | |
| 157 | ||
| 158 | if (request->flags.needValidation) | |
| 159 | return false; | |
| 160 | } | |
| 161 | ||
| 162 | // CONNECT requests are proxy requests. Not to be forwarded to origin servers. | |
| 163 | // Unless the destination port matches, in which case we MAY perform a 'DIRECT' to this CachePeer. | |
| 164 | if (p->options.originserver && request->method == Http::METHOD_CONNECT && request->url.port() != p->http_port) | |
| 165 | return false; | |
| 166 | ||
| 167 | if (p->access == nullptr) | |
| 168 | return true; | |
| 169 | ||
| 170 | ACLFilledChecklist checklist(p->access, request); | |
| 171 | checklist.updateAle(ps->al); | |
| 172 | checklist.syncAle(request, nullptr); | |
| 173 | return checklist.fastCheck().allowed(); | |
| 174 | } | |
| 175 | ||
| 176 | /* Return TRUE if it is okay to send an ICP request to this CachePeer. */ | |
| 177 | static int | |
| 178 | peerWouldBePinged(const CachePeer * p, PeerSelector * ps) | |
| 179 | { | |
| 180 | assert(ps); | |
| 181 | HttpRequest *request = ps->request; | |
| 182 | ||
| 183 | if (p->icp.port == 0) | |
| 184 | return 0; | |
| 185 | ||
| 186 | if (p->options.no_query) | |
| 187 | return 0; | |
| 188 | ||
| 189 | if (p->options.mcast_responder) | |
| 190 | return 0; | |
| 191 | ||
| 192 | if (p->n_addresses == 0) | |
| 193 | return 0; | |
| 194 | ||
| 195 | if (p->options.background_ping && (squid_curtime - p->stats.last_query < Config.backgroundPingRate)) | |
| 196 | return 0; | |
| 197 | ||
| 198 | /* the case below seems strange, but can happen if the | |
| 199 | * URL host is on the other side of a firewall */ | |
| 200 | if (p->type == PEER_SIBLING) | |
| 201 | if (!request->flags.hierarchical) | |
| 202 | return 0; | |
| 203 | ||
| 204 | if (!peerAllowedToUse(p, ps)) | |
| 205 | return 0; | |
| 206 | ||
| 207 | /* Ping dead peers every timeout interval */ | |
| 208 | if (squid_curtime - p->stats.last_query > Config.Timeout.deadPeer) | |
| 209 | return 1; | |
| 210 | ||
| 211 | if (!neighborUp(p)) | |
| 212 | return 0; | |
| 213 | ||
| 214 | return 1; | |
| 215 | } | |
| 216 | ||
| 217 | bool | |
| 218 | peerCanOpenMore(const CachePeer *p) | |
| 219 | { | |
| 220 | const int effectiveLimit = p->max_conn <= 0 ? Squid_MaxFD : p->max_conn; | |
| 221 | const int remaining = effectiveLimit - p->stats.conn_open; | |
| 222 | debugs(15, 7, remaining << '=' << effectiveLimit << '-' << p->stats.conn_open); | |
| 223 | return remaining > 0; | |
| 224 | } | |
| 225 | ||
| 226 | bool | |
| 227 | peerHasConnAvailable(const CachePeer *p) | |
| 228 | { | |
| 229 | // Standby connections can be used without opening new connections. | |
| 230 | const int standbys = p->standby.pool ? p->standby.pool->count() : 0; | |
| 231 | ||
| 232 | // XXX: Some idle pconns can be used without opening new connections. | |
| 233 | // Complication: Idle pconns cannot be reused for some requests. | |
| 234 | const int usableIdles = 0; | |
| 235 | ||
| 236 | const int available = standbys + usableIdles; | |
| 237 | debugs(15, 7, available << '=' << standbys << '+' << usableIdles); | |
| 238 | return available > 0; | |
| 239 | } | |
| 240 | ||
| 241 | void | |
| 242 | peerConnClosed(CachePeer *p) | |
| 243 | { | |
| 244 | --p->stats.conn_open; | |
| 245 | if (p->standby.waitingForClose && peerCanOpenMore(p)) { | |
| 246 | p->standby.waitingForClose = false; | |
| 247 | PeerPoolMgr::Checkpoint(p->standby.mgr, "conn closed"); | |
| 248 | } | |
| 249 | } | |
| 250 | ||
| 251 | /* Return TRUE if it is okay to send an HTTP request to this CachePeer. */ | |
| 252 | int | |
| 253 | peerHTTPOkay(const CachePeer * p, PeerSelector * ps) | |
| 254 | { | |
| 255 | if (!peerCanOpenMore(p) && !peerHasConnAvailable(p)) | |
| 256 | return 0; | |
| 257 | ||
| 258 | if (!peerAllowedToUse(p, ps)) | |
| 259 | return 0; | |
| 260 | ||
| 261 | if (!neighborUp(p)) | |
| 262 | return 0; | |
| 263 | ||
| 264 | return 1; | |
| 265 | } | |
| 266 | ||
| 267 | int | |
| 268 | neighborsCount(PeerSelector *ps) | |
| 269 | { | |
| 270 | int count = 0; | |
| 271 | ||
| 272 | for (const auto &p: CurrentCachePeers()) | |
| 273 | if (peerWouldBePinged(p.get(), ps)) | |
| 274 | ++count; | |
| 275 | ||
| 276 | debugs(15, 3, "neighborsCount: " << count); | |
| 277 | ||
| 278 | return count; | |
| 279 | } | |
| 280 | ||
| 281 | CachePeer * | |
| 282 | getFirstUpParent(PeerSelector *ps) | |
| 283 | { | |
| 284 | assert(ps); | |
| 285 | HttpRequest *request = ps->request; | |
| 286 | ||
| 287 | for (const auto &peer: CurrentCachePeers()) { | |
| 288 | const auto p = peer.get(); | |
| 289 | ||
| 290 | if (!neighborUp(p)) | |
| 291 | continue; | |
| 292 | ||
| 293 | if (neighborType(p, request->url) != PEER_PARENT) | |
| 294 | continue; | |
| 295 | ||
| 296 | if (!peerHTTPOkay(p, ps)) | |
| 297 | continue; | |
| 298 | ||
| 299 | debugs(15, 3, "returning " << *p); | |
| 300 | return p; | |
| 301 | } | |
| 302 | ||
| 303 | debugs(15, 3, "none found"); | |
| 304 | return nullptr; | |
| 305 | } | |
| 306 | ||
| 307 | CachePeer * | |
| 308 | getRoundRobinParent(PeerSelector *ps) | |
| 309 | { | |
| 310 | assert(ps); | |
| 311 | HttpRequest *request = ps->request; | |
| 312 | ||
| 313 | CachePeer *q = nullptr; | |
| 314 | ||
| 315 | for (const auto &peer: CurrentCachePeers()) { | |
| 316 | const auto p = peer.get(); | |
| 317 | if (!p->options.roundrobin) | |
| 318 | continue; | |
| 319 | ||
| 320 | if (neighborType(p, request->url) != PEER_PARENT) | |
| 321 | continue; | |
| 322 | ||
| 323 | if (!peerHTTPOkay(p, ps)) | |
| 324 | continue; | |
| 325 | ||
| 326 | if (p->weight == 0) | |
| 327 | continue; | |
| 328 | ||
| 329 | if (q) { | |
| 330 | if (p->weight == q->weight) { | |
| 331 | if (q->rr_count < p->rr_count) | |
| 332 | continue; | |
| 333 | } else if ( ((double) q->rr_count / q->weight) < ((double) p->rr_count / p->weight)) { | |
| 334 | continue; | |
| 335 | } | |
| 336 | } | |
| 337 | ||
| 338 | q = p; | |
| 339 | } | |
| 340 | ||
| 341 | if (q) | |
| 342 | ++ q->rr_count; | |
| 343 | ||
| 344 | debugs(15, 3, "returning " << RawPointer(q).orNil()); | |
| 345 | ||
| 346 | return q; | |
| 347 | } | |
| 348 | ||
| 349 | CachePeer * | |
| 350 | getWeightedRoundRobinParent(PeerSelector *ps) | |
| 351 | { | |
| 352 | assert(ps); | |
| 353 | HttpRequest *request = ps->request; | |
| 354 | ||
| 355 | CachePeer *q = nullptr; | |
| 356 | int weighted_rtt; | |
| 357 | ||
| 358 | for (const auto &peer: CurrentCachePeers()) { | |
| 359 | const auto p = peer.get(); | |
| 360 | ||
| 361 | if (!p->options.weighted_roundrobin) | |
| 362 | continue; | |
| 363 | ||
| 364 | if (neighborType(p, request->url) != PEER_PARENT) | |
| 365 | continue; | |
| 366 | ||
| 367 | if (!peerHTTPOkay(p, ps)) | |
| 368 | continue; | |
| 369 | ||
| 370 | if (q && q->rr_count < p->rr_count) | |
| 371 | continue; | |
| 372 | ||
| 373 | q = p; | |
| 374 | } | |
| 375 | ||
| 376 | if (q && q->rr_count > 1000000) | |
| 377 | for (const auto &p: CurrentCachePeers()) { | |
| 378 | if (!p->options.weighted_roundrobin) | |
| 379 | continue; | |
| 380 | ||
| 381 | if (neighborType(p.get(), request->url) != PEER_PARENT) | |
| 382 | continue; | |
| 383 | ||
| 384 | p->rr_count = 0; | |
| 385 | } | |
| 386 | ||
| 387 | if (q) { | |
| 388 | weighted_rtt = (q->stats.rtt - q->basetime) / q->weight; | |
| 389 | ||
| 390 | if (weighted_rtt < 1) | |
| 391 | weighted_rtt = 1; | |
| 392 | ||
| 393 | q->rr_count += weighted_rtt; | |
| 394 | ||
| 395 | debugs(15, 3, "getWeightedRoundRobinParent: weighted_rtt " << weighted_rtt); | |
| 396 | } | |
| 397 | ||
| 398 | debugs(15, 3, "returning " << RawPointer(q).orNil()); | |
| 399 | return q; | |
| 400 | } | |
| 401 | ||
| 402 | /** | |
| 403 | * This gets called every 5 minutes to clear the round-robin counter. | |
| 404 | * The exact timing is an arbitrary default, set on estimate timing of a | |
| 405 | * large number of requests in a high-performance environment during the | |
| 406 | * period. The larger the number of requests between cycled resets the | |
| 407 | * more balanced the operations. | |
| 408 | * | |
| 409 | * \param data unused | |
| 410 | * | |
| 411 | * TODO: Make the reset timing a selectable parameter in squid.conf | |
| 412 | */ | |
| 413 | static void | |
| 414 | peerClearRRLoop(void *data) | |
| 415 | { | |
| 416 | peerClearRR(); | |
| 417 | eventAdd("peerClearRR", peerClearRRLoop, data, 5 * 60.0, 0); | |
| 418 | } | |
| 419 | ||
| 420 | /** | |
| 421 | * This gets called on startup and restart to kick off the CachePeer round-robin | |
| 422 | * maintenance event. It ensures that no matter how many times its called | |
| 423 | * no more than one event is scheduled. | |
| 424 | */ | |
| 425 | void | |
| 426 | peerClearRRStart(void) | |
| 427 | { | |
| 428 | static bool event_added = false; | |
| 429 | if (!event_added) { | |
| 430 | peerClearRRLoop(nullptr); | |
| 431 | event_added=true; | |
| 432 | } | |
| 433 | } | |
| 434 | ||
| 435 | /** | |
| 436 | * Called whenever the round-robin counters need to be reset to a sane state. | |
| 437 | * So far those times are: | |
| 438 | * - On startup and reconfigure - to set the counters to sane initial settings. | |
| 439 | * - When a CachePeer has revived from dead, to prevent the revived CachePeer being | |
| 440 | * flooded with requests which it has 'missed' during the down period. | |
| 441 | */ | |
| 442 | void | |
| 443 | peerClearRR() | |
| 444 | { | |
| 445 | for (const auto &p: CurrentCachePeers()) | |
| 446 | p->rr_count = 1; | |
| 447 | } | |
| 448 | ||
| 449 | void | |
| 450 | peerAlive(CachePeer *p) | |
| 451 | { | |
| 452 | if (p->stats.logged_state == PEER_DEAD && p->tcp_up) { | |
| 453 | debugs(15, DBG_IMPORTANT, "Detected REVIVED " << neighborTypeStr(p) << ": " << *p); | |
| 454 | p->stats.logged_state = PEER_ALIVE; | |
| 455 | peerClearRR(); | |
| 456 | if (p->standby.mgr.valid()) | |
| 457 | PeerPoolMgr::Checkpoint(p->standby.mgr, "revived peer"); | |
| 458 | } | |
| 459 | ||
| 460 | p->stats.last_reply = squid_curtime; | |
| 461 | p->stats.probe_start = 0; | |
| 462 | ||
| 463 | // TODO: Remove or explain how we could detect an alive peer without IP addresses | |
| 464 | if (!p->n_addresses) | |
| 465 | ipcache_nbgethostbyname(p->host, peerDNSConfigure, p); | |
| 466 | } | |
| 467 | ||
| 468 | CachePeer * | |
| 469 | getDefaultParent(PeerSelector *ps) | |
| 470 | { | |
| 471 | assert(ps); | |
| 472 | HttpRequest *request = ps->request; | |
| 473 | ||
| 474 | for (const auto &peer: CurrentCachePeers()) { | |
| 475 | const auto p = peer.get(); | |
| 476 | ||
| 477 | if (neighborType(p, request->url) != PEER_PARENT) | |
| 478 | continue; | |
| 479 | ||
| 480 | if (!p->options.default_parent) | |
| 481 | continue; | |
| 482 | ||
| 483 | if (!peerHTTPOkay(p, ps)) | |
| 484 | continue; | |
| 485 | ||
| 486 | debugs(15, 3, "returning " << *p); | |
| 487 | ||
| 488 | return p; | |
| 489 | } | |
| 490 | ||
| 491 | // TODO: Refactor similar get*() functions to use our return/reporting style | |
| 492 | debugs(15, 3, "none found"); | |
| 493 | return nullptr; | |
| 494 | } | |
| 495 | ||
| 496 | static void | |
| 497 | neighborsRegisterWithCacheManager() | |
| 498 | { | |
| 499 | Mgr::RegisterAction("server_list", | |
| 500 | "Peer Cache Statistics", | |
| 501 | neighborDumpPeers, 0, 1); | |
| 502 | } | |
| 503 | ||
| 504 | void | |
| 505 | neighbors_init(void) | |
| 506 | { | |
| 507 | const char *me = getMyHostname(); | |
| 508 | ||
| 509 | neighborsRegisterWithCacheManager(); | |
| 510 | ||
| 511 | if (Comm::IsConnOpen(icpIncomingConn)) { | |
| 512 | RawCachePeers peersToRemove; | |
| 513 | ||
| 514 | for (const auto &thisPeer: CurrentCachePeers()) { | |
| 515 | if (0 != strcmp(thisPeer->host, me)) | |
| 516 | continue; | |
| 517 | ||
| 518 | for (AnyP::PortCfgPointer s = HttpPortList; s != nullptr; s = s->next) { | |
| 519 | if (thisPeer->http_port != s->s.port()) | |
| 520 | continue; | |
| 521 | ||
| 522 | debugs(15, DBG_IMPORTANT, "WARNING: Peer looks like this host." << | |
| 523 | Debug::Extra << "Ignoring cache_peer " << *thisPeer); | |
| 524 | ||
| 525 | peersToRemove.push_back(thisPeer.get()); | |
| 526 | break; // avoid warning about (and removing) the same CachePeer twice | |
| 527 | } | |
| 528 | } | |
| 529 | ||
| 530 | while (peersToRemove.size()) { | |
| 531 | const auto p = peersToRemove.back(); | |
| 532 | peersToRemove.pop_back(); | |
| 533 | DeleteConfigured(p); | |
| 534 | } | |
| 535 | } | |
| 536 | ||
| 537 | peerDnsRefreshStart(); | |
| 538 | ||
| 539 | const auto sep = xgetservbyname("echo", "udp"); | |
| 540 | echo_port = sep ? ntohs((unsigned short) sep->s_port) : 7; | |
| 541 | } | |
| 542 | ||
| 543 | int | |
| 544 | neighborsUdpPing(HttpRequest * request, | |
| 545 | StoreEntry * entry, | |
| 546 | IRCB * callback, | |
| 547 | PeerSelector *ps, | |
| 548 | int *exprep, | |
| 549 | int *timeout) | |
| 550 | { | |
| 551 | const char *url = entry->url(); | |
| 552 | MemObject *mem = entry->mem_obj; | |
| 553 | int reqnum = 0; | |
| 554 | int flags; | |
| 555 | int peers_pinged = 0; | |
| 556 | int parent_timeout = 0, parent_exprep = 0; | |
| 557 | int sibling_timeout = 0, sibling_exprep = 0; | |
| 558 | int mcast_timeout = 0, mcast_exprep = 0; | |
| 559 | ||
| 560 | if (Config.peers == nullptr) | |
| 561 | return 0; | |
| 562 | ||
| 563 | assert(!entry->hasDisk()); | |
| 564 | ||
| 565 | mem->start_ping = current_time; | |
| 566 | ||
| 567 | mem->ping_reply_callback = callback; | |
| 568 | ||
| 569 | mem->ircb_data = ps; | |
| 570 | ||
| 571 | reqnum = icpSetCacheKey((const cache_key *)entry->key); | |
| 572 | ||
| 573 | const auto savedContext = CodeContext::Current(); | |
| 574 | for (size_t i = 0; i < Config.peers->size(); ++i) { | |
| 575 | const auto p = &Config.peers->nextPeerToPing(i); | |
| 576 | ||
| 577 | CodeContext::Reset(p->probeCodeContext); | |
| 578 | ||
| 579 | debugs(15, 5, "candidate: " << *p); | |
| 580 | ||
| 581 | if (!peerWouldBePinged(p, ps)) | |
| 582 | continue; /* next CachePeer */ | |
| 583 | ||
| 584 | ++peers_pinged; | |
| 585 | ||
| 586 | debugs(15, 4, "pinging cache_peer " << *p << " for '" << url << "'"); | |
| 587 | ||
| 588 | debugs(15, 3, "neighborsUdpPing: key = '" << entry->getMD5Text() << "'"); | |
| 589 | ||
| 590 | debugs(15, 3, "neighborsUdpPing: reqnum = " << reqnum); | |
| 591 | ||
| 592 | #if USE_HTCP | |
| 593 | if (p->options.htcp && !p->options.htcp_only_clr) { | |
| 594 | if (Config.Port.htcp <= 0) { | |
| 595 | debugs(15, DBG_CRITICAL, "ERROR: HTCP is disabled! Cannot send HTCP request to peer."); | |
| 596 | continue; | |
| 597 | } | |
| 598 | ||
| 599 | debugs(15, 3, "neighborsUdpPing: sending HTCP query"); | |
| 600 | if (htcpQuery(entry, request, p) <= 0) | |
| 601 | continue; // unable to send. | |
| 602 | } else | |
| 603 | #endif | |
| 604 | { | |
| 605 | if (Config.Port.icp <= 0 || !Comm::IsConnOpen(icpOutgoingConn)) { | |
| 606 | debugs(15, DBG_CRITICAL, "ERROR: ICP is disabled! Cannot send ICP request to peer."); | |
| 607 | continue; | |
| 608 | } else { | |
| 609 | ||
| 610 | if (p->type == PEER_MULTICAST) | |
| 611 | mcastSetTtl(icpOutgoingConn->fd, p->mcast.ttl); | |
| 612 | ||
| 613 | if (p->icp.port == echo_port) { | |
| 614 | debugs(15, 4, "neighborsUdpPing: Looks like a dumb cache, send DECHO ping"); | |
| 615 | // TODO: Get ALE from callback_data if possible. | |
| 616 | icpCreateAndSend(ICP_DECHO, 0, url, reqnum, 0, | |
| 617 | icpOutgoingConn->fd, p->in_addr, nullptr); | |
| 618 | } else { | |
| 619 | flags = 0; | |
| 620 | ||
| 621 | if (Config.onoff.query_icmp) | |
| 622 | if (p->icp.version == ICP_VERSION_2) | |
| 623 | flags |= ICP_FLAG_SRC_RTT; | |
| 624 | ||
| 625 | // TODO: Get ALE from callback_data if possible. | |
| 626 | icpCreateAndSend(ICP_QUERY, flags, url, reqnum, 0, | |
| 627 | icpOutgoingConn->fd, p->in_addr, nullptr); | |
| 628 | } | |
| 629 | } | |
| 630 | } | |
| 631 | ||
| 632 | ++ p->stats.pings_sent; | |
| 633 | ||
| 634 | if (p->type == PEER_MULTICAST) { | |
| 635 | mcast_exprep += p->mcast.n_replies_expected; | |
| 636 | mcast_timeout += (p->stats.rtt * p->mcast.n_replies_expected); | |
| 637 | } else if (neighborUp(p)) { | |
| 638 | /* its alive, expect a reply from it */ | |
| 639 | ||
| 640 | if (neighborType(p, request->url) == PEER_PARENT) { | |
| 641 | ++parent_exprep; | |
| 642 | parent_timeout += p->stats.rtt; | |
| 643 | } else { | |
| 644 | ++sibling_exprep; | |
| 645 | sibling_timeout += p->stats.rtt; | |
| 646 | } | |
| 647 | } else { | |
| 648 | /* Neighbor is dead; ping it anyway, but don't expect a reply */ | |
| 649 | /* log it once at the threshold */ | |
| 650 | ||
| 651 | if (p->stats.logged_state == PEER_ALIVE) { | |
| 652 | debugs(15, DBG_IMPORTANT, "Detected DEAD " << neighborTypeStr(p) << ": " << *p); | |
| 653 | p->stats.logged_state = PEER_DEAD; | |
| 654 | } | |
| 655 | } | |
| 656 | ||
| 657 | p->stats.last_query = squid_curtime; | |
| 658 | ||
| 659 | /* | |
| 660 | * keep probe_start == 0 for a multicast CachePeer, | |
| 661 | * so neighborUp() never says this CachePeer is dead. | |
| 662 | */ | |
| 663 | ||
| 664 | if ((p->type != PEER_MULTICAST) && (p->stats.probe_start == 0)) | |
| 665 | p->stats.probe_start = squid_curtime; | |
| 666 | } | |
| 667 | CodeContext::Reset(savedContext); | |
| 668 | ||
| 669 | /* | |
| 670 | * How many replies to expect? | |
| 671 | */ | |
| 672 | *exprep = parent_exprep + sibling_exprep + mcast_exprep; | |
| 673 | ||
| 674 | /* | |
| 675 | * If there is a configured timeout, use it | |
| 676 | */ | |
| 677 | if (Config.Timeout.icp_query) | |
| 678 | *timeout = Config.Timeout.icp_query; | |
| 679 | else { | |
| 680 | if (*exprep > 0) { | |
| 681 | if (parent_exprep) | |
| 682 | *timeout = 2 * parent_timeout / parent_exprep; | |
| 683 | else if (mcast_exprep) | |
| 684 | *timeout = 2 * mcast_timeout / mcast_exprep; | |
| 685 | else | |
| 686 | *timeout = 2 * sibling_timeout / sibling_exprep; | |
| 687 | } else | |
| 688 | *timeout = 2000; /* 2 seconds */ | |
| 689 | ||
| 690 | if (Config.Timeout.icp_query_max) | |
| 691 | if (*timeout > Config.Timeout.icp_query_max) | |
| 692 | *timeout = Config.Timeout.icp_query_max; | |
| 693 | ||
| 694 | if (*timeout < Config.Timeout.icp_query_min) | |
| 695 | *timeout = Config.Timeout.icp_query_min; | |
| 696 | } | |
| 697 | ||
| 698 | return peers_pinged; | |
| 699 | } | |
| 700 | ||
| 701 | /* lookup the digest of a given CachePeer */ | |
| 702 | lookup_t | |
| 703 | peerDigestLookup(CachePeer * p, PeerSelector * ps) | |
| 704 | { | |
| 705 | #if USE_CACHE_DIGESTS | |
| 706 | assert(ps); | |
| 707 | HttpRequest *request = ps->request; | |
| 708 | assert(request); | |
| 709 | ||
| 710 | assert(p); | |
| 711 | debugs(15, 5, "cache_peer " << *p); | |
| 712 | /* does the peeer have a valid digest? */ | |
| 713 | ||
| 714 | if (!p->digest) { | |
| 715 | debugs(15, 5, "peerDigestLookup: gone!"); | |
| 716 | return LOOKUP_NONE; | |
| 717 | } else if (!peerHTTPOkay(p, ps)) { | |
| 718 | debugs(15, 5, "peerDigestLookup: !peerHTTPOkay"); | |
| 719 | return LOOKUP_NONE; | |
| 720 | } else if (!p->digest->flags.needed) { | |
| 721 | debugs(15, 5, "peerDigestLookup: note need"); | |
| 722 | peerDigestNeeded(p->digest); | |
| 723 | return LOOKUP_NONE; | |
| 724 | } else if (!p->digest->flags.usable) { | |
| 725 | debugs(15, 5, "peerDigestLookup: !ready && " << (p->digest->flags.requested ? "" : "!") << "requested"); | |
| 726 | return LOOKUP_NONE; | |
| 727 | } | |
| 728 | ||
| 729 | debugs(15, 5, "OK to lookup cache_peer " << *p); | |
| 730 | assert(p->digest->cd); | |
| 731 | /* does digest predict a hit? */ | |
| 732 | ||
| 733 | if (!p->digest->cd->contains(storeKeyPublicByRequest(request))) | |
| 734 | return LOOKUP_MISS; | |
| 735 | ||
| 736 | debugs(15, 5, "HIT for cache_peer " << *p); | |
| 737 | ||
| 738 | return LOOKUP_HIT; | |
| 739 | #else | |
| 740 | (void)p; | |
| 741 | (void)ps; | |
| 742 | #endif | |
| 743 | ||
| 744 | return LOOKUP_NONE; | |
| 745 | } | |
| 746 | ||
| 747 | /* select best CachePeer based on cache digests */ | |
| 748 | CachePeer * | |
| 749 | neighborsDigestSelect(PeerSelector *ps) | |
| 750 | { | |
| 751 | CachePeer *best_p = nullptr; | |
| 752 | #if USE_CACHE_DIGESTS | |
| 753 | assert(ps); | |
| 754 | HttpRequest *request = ps->request; | |
| 755 | ||
| 756 | int best_rtt = 0; | |
| 757 | int choice_count = 0; | |
| 758 | int ichoice_count = 0; | |
| 759 | int p_rtt; | |
| 760 | ||
| 761 | if (!Config.peers) | |
| 762 | return nullptr; | |
| 763 | ||
| 764 | if (!request->flags.hierarchical) | |
| 765 | return nullptr; | |
| 766 | ||
| 767 | storeKeyPublicByRequest(request); | |
| 768 | ||
| 769 | for (size_t i = 0; i < Config.peers->size(); ++i) { | |
| 770 | const auto p = &Config.peers->nextPeerToPing(i); | |
| 771 | ||
| 772 | const auto lookup = peerDigestLookup(p, ps); | |
| 773 | ||
| 774 | if (lookup == LOOKUP_NONE) | |
| 775 | continue; | |
| 776 | ||
| 777 | ++choice_count; | |
| 778 | ||
| 779 | if (lookup == LOOKUP_MISS) | |
| 780 | continue; | |
| 781 | ||
| 782 | p_rtt = netdbHostRtt(p->host); | |
| 783 | ||
| 784 | debugs(15, 5, "cache_peer " << *p << " rtt: " << p_rtt); | |
| 785 | ||
| 786 | /* is this CachePeer better than others in terms of rtt ? */ | |
| 787 | if (!best_p || (p_rtt && p_rtt < best_rtt)) { | |
| 788 | best_p = p; | |
| 789 | best_rtt = p_rtt; | |
| 790 | ||
| 791 | if (p_rtt) /* informative choice (aka educated guess) */ | |
| 792 | ++ichoice_count; | |
| 793 | ||
| 794 | debugs(15, 4, "cache_peer " << *p << " leads with rtt " << best_rtt); | |
| 795 | } | |
| 796 | } | |
| 797 | ||
| 798 | debugs(15, 4, "neighborsDigestSelect: choices: " << choice_count << " (" << ichoice_count << ")"); | |
| 799 | peerNoteDigestLookup(request, best_p, | |
| 800 | best_p ? LOOKUP_HIT : (choice_count ? LOOKUP_MISS : LOOKUP_NONE)); | |
| 801 | request->hier.n_choices = choice_count; | |
| 802 | request->hier.n_ichoices = ichoice_count; | |
| 803 | #else | |
| 804 | (void)ps; | |
| 805 | #endif | |
| 806 | ||
| 807 | return best_p; | |
| 808 | } | |
| 809 | ||
| 810 | void | |
| 811 | peerNoteDigestLookup(HttpRequest * request, CachePeer * p, lookup_t lookup) | |
| 812 | { | |
| 813 | #if USE_CACHE_DIGESTS | |
| 814 | if (p) | |
| 815 | strncpy(request->hier.cd_host, p->host, sizeof(request->hier.cd_host)-1); | |
| 816 | else | |
| 817 | *request->hier.cd_host = '\0'; | |
| 818 | ||
| 819 | request->hier.cd_lookup = lookup; | |
| 820 | debugs(15, 4, "cache_peer " << RawPointer(p).orNil() << ", lookup: " << lookup_t_str[lookup]); | |
| 821 | #else | |
| 822 | (void)request; | |
| 823 | (void)p; | |
| 824 | (void)lookup; | |
| 825 | #endif | |
| 826 | } | |
| 827 | ||
| 828 | static void | |
| 829 | neighborAlive(CachePeer * p, const MemObject *, const icp_common_t * header) | |
| 830 | { | |
| 831 | peerAlive(p); | |
| 832 | ++ p->stats.pings_acked; | |
| 833 | ||
| 834 | if ((icp_opcode) header->opcode <= ICP_END) | |
| 835 | ++ p->icp.counts[header->opcode]; | |
| 836 | ||
| 837 | p->icp.version = (int) header->version; | |
| 838 | } | |
| 839 | ||
| 840 | static void | |
| 841 | neighborUpdateRtt(CachePeer * p, MemObject * mem) | |
| 842 | { | |
| 843 | int rtt, rtt_av_factor; | |
| 844 | ||
| 845 | if (!mem) | |
| 846 | return; | |
| 847 | ||
| 848 | if (!mem->start_ping.tv_sec) | |
| 849 | return; | |
| 850 | ||
| 851 | rtt = tvSubMsec(mem->start_ping, current_time); | |
| 852 | ||
| 853 | if (rtt < 1 || rtt > 10000) | |
| 854 | return; | |
| 855 | ||
| 856 | rtt_av_factor = RTT_AV_FACTOR; | |
| 857 | ||
| 858 | if (p->options.weighted_roundrobin) | |
| 859 | rtt_av_factor = RTT_BACKGROUND_AV_FACTOR; | |
| 860 | ||
| 861 | p->stats.rtt = Math::intAverage(p->stats.rtt, rtt, p->stats.pings_acked, rtt_av_factor); | |
| 862 | } | |
| 863 | ||
| 864 | #if USE_HTCP | |
| 865 | static void | |
| 866 | neighborAliveHtcp(CachePeer * p, const MemObject *, const HtcpReplyData * htcp) | |
| 867 | { | |
| 868 | peerAlive(p); | |
| 869 | ++ p->stats.pings_acked; | |
| 870 | ++ p->htcp.counts[htcp->hit ? 1 : 0]; | |
| 871 | p->htcp.version = htcp->version; | |
| 872 | } | |
| 873 | ||
| 874 | #endif | |
| 875 | ||
| 876 | static void | |
| 877 | neighborCountIgnored(CachePeer * p) | |
| 878 | { | |
| 879 | if (p == nullptr) | |
| 880 | return; | |
| 881 | ||
| 882 | ++ p->stats.ignored_replies; | |
| 883 | ||
| 884 | ++NLateReplies; | |
| 885 | } | |
| 886 | ||
| 887 | static void | |
| 888 | neighborIgnoreNonPeer(const Ip::Address &from, icp_opcode opcode) | |
| 889 | { | |
| 890 | static uint64_t ignoredReplies = 0; | |
| 891 | if (isPowTen(++ignoredReplies)) { | |
| 892 | debugs(15, DBG_IMPORTANT, "WARNING: Ignored " << ignoredReplies << " ICP replies from non-peers" << | |
| 893 | Debug::Extra << "last seen non-peer source address: " << from << | |
| 894 | Debug::Extra << "last seen ICP reply opcode: " << icp_opcode_str[opcode]); | |
| 895 | } | |
| 896 | } | |
| 897 | ||
| 898 | /* ignoreMulticastReply | |
| 899 | * | |
| 900 | * * We want to ignore replies from multicast peers if the | |
| 901 | * * cache_host_domain rules would normally prevent the CachePeer | |
| 902 | * * from being used | |
| 903 | */ | |
| 904 | static int | |
| 905 | ignoreMulticastReply(CachePeer * p, PeerSelector * ps) | |
| 906 | { | |
| 907 | if (p == nullptr) | |
| 908 | return 0; | |
| 909 | ||
| 910 | if (!p->options.mcast_responder) | |
| 911 | return 0; | |
| 912 | ||
| 913 | if (peerHTTPOkay(p, ps)) | |
| 914 | return 0; | |
| 915 | ||
| 916 | return 1; | |
| 917 | } | |
| 918 | ||
| 919 | /** | |
| 920 | * I should attach these records to the entry. We take the first | |
| 921 | * hit we get our wait until everyone misses. The timeout handler | |
| 922 | * call needs to nip this shopping list or call one of the misses. | |
| 923 | * | |
| 924 | * If a hit process is already started, then sobeit | |
| 925 | */ | |
| 926 | void | |
| 927 | neighborsUdpAck(const cache_key * key, icp_common_t * header, const Ip::Address &from) | |
| 928 | { | |
| 929 | CachePeer *p = nullptr; | |
| 930 | StoreEntry *entry; | |
| 931 | MemObject *mem = nullptr; | |
| 932 | peer_t ntype = PEER_NONE; | |
| 933 | icp_opcode opcode = (icp_opcode) header->opcode; | |
| 934 | ||
| 935 | debugs(15, 6, "neighborsUdpAck: opcode " << opcode << " '" << storeKeyText(key) << "'"); | |
| 936 | ||
| 937 | if ((entry = Store::Root().findCallbackXXX(key))) | |
| 938 | mem = entry->mem_obj; | |
| 939 | ||
| 940 | if ((p = whichPeer(from))) | |
| 941 | neighborAlive(p, mem, header); | |
| 942 | ||
| 943 | if (opcode > ICP_END) | |
| 944 | return; | |
| 945 | ||
| 946 | const char *opcode_d = icp_opcode_str[opcode]; | |
| 947 | ||
| 948 | if (p) | |
| 949 | neighborUpdateRtt(p, mem); | |
| 950 | ||
| 951 | /* Does the entry exist? */ | |
| 952 | if (nullptr == entry) { | |
| 953 | debugs(12, 3, "neighborsUdpAck: Cache key '" << storeKeyText(key) << "' not found"); | |
| 954 | neighborCountIgnored(p); | |
| 955 | return; | |
| 956 | } | |
| 957 | ||
| 958 | /* check if someone is already fetching it */ | |
| 959 | if (EBIT_TEST(entry->flags, ENTRY_DISPATCHED)) { | |
| 960 | debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key) << "' already being fetched."); | |
| 961 | neighborCountIgnored(p); | |
| 962 | return; | |
| 963 | } | |
| 964 | ||
| 965 | if (mem == nullptr) { | |
| 966 | debugs(15, 2, "Ignoring " << opcode_d << " for missing mem_obj: " << storeKeyText(key)); | |
| 967 | neighborCountIgnored(p); | |
| 968 | return; | |
| 969 | } | |
| 970 | ||
| 971 | if (entry->ping_status != PING_WAITING) { | |
| 972 | debugs(15, 2, "neighborsUdpAck: Late " << opcode_d << " for " << storeKeyText(key)); | |
| 973 | neighborCountIgnored(p); | |
| 974 | return; | |
| 975 | } | |
| 976 | ||
| 977 | if (!entry->locked()) { | |
| 978 | // TODO: many entries are unlocked; why is this reported at level 1? | |
| 979 | debugs(12, DBG_IMPORTANT, "neighborsUdpAck: '" << storeKeyText(key) << "' has no locks"); | |
| 980 | neighborCountIgnored(p); | |
| 981 | return; | |
| 982 | } | |
| 983 | ||
| 984 | if (!mem->ircb_data) { | |
| 985 | debugs(12, DBG_IMPORTANT, "ERROR: Squid BUG: missing ICP callback data for " << *entry); | |
| 986 | neighborCountIgnored(p); | |
| 987 | return; | |
| 988 | } | |
| 989 | ||
| 990 | debugs(15, 3, opcode_d << " for " << storeKeyText(key) << " from " << RawPointer(p).orNil("source")); | |
| 991 | ||
| 992 | if (p) { | |
| 993 | ntype = neighborType(p, mem->request->url); | |
| 994 | } | |
| 995 | ||
| 996 | if (ignoreMulticastReply(p, mem->ircb_data)) { | |
| 997 | neighborCountIgnored(p); | |
| 998 | } else if (opcode == ICP_MISS) { | |
| 999 | if (p == nullptr) { | |
| 1000 | neighborIgnoreNonPeer(from, opcode); | |
| 1001 | } else { | |
| 1002 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
| 1003 | } | |
| 1004 | } else if (opcode == ICP_HIT) { | |
| 1005 | if (p == nullptr) { | |
| 1006 | neighborIgnoreNonPeer(from, opcode); | |
| 1007 | } else { | |
| 1008 | header->opcode = ICP_HIT; | |
| 1009 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
| 1010 | } | |
| 1011 | } else if (opcode == ICP_DECHO) { | |
| 1012 | if (p == nullptr) { | |
| 1013 | neighborIgnoreNonPeer(from, opcode); | |
| 1014 | } else if (ntype == PEER_SIBLING) { | |
| 1015 | debug_trap("neighborsUdpAck: Found non-ICP cache as SIBLING\n"); | |
| 1016 | debug_trap("neighborsUdpAck: non-ICP neighbors must be a PARENT\n"); | |
| 1017 | } else { | |
| 1018 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
| 1019 | } | |
| 1020 | } else if (opcode == ICP_SECHO) { | |
| 1021 | if (p) { | |
| 1022 | debugs(15, DBG_IMPORTANT, "Ignoring SECHO from neighbor " << *p); | |
| 1023 | neighborCountIgnored(p); | |
| 1024 | } else { | |
| 1025 | debugs(15, DBG_IMPORTANT, "Unsolicited SECHO from " << from); | |
| 1026 | } | |
| 1027 | } else if (opcode == ICP_DENIED) { | |
| 1028 | if (p == nullptr) { | |
| 1029 | neighborIgnoreNonPeer(from, opcode); | |
| 1030 | } else if (p->stats.pings_acked > 100) { | |
| 1031 | if (100 * p->icp.counts[ICP_DENIED] / p->stats.pings_acked > 95) { | |
| 1032 | debugs(15, DBG_CRITICAL, "Disabling cache_peer " << *p << | |
| 1033 | " because over 95% of its replies are UDP_DENIED"); | |
| 1034 | DeleteConfigured(p); | |
| 1035 | p = nullptr; | |
| 1036 | } else { | |
| 1037 | neighborCountIgnored(p); | |
| 1038 | } | |
| 1039 | } | |
| 1040 | } else if (opcode == ICP_MISS_NOFETCH) { | |
| 1041 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_ICP, header, mem->ircb_data); | |
| 1042 | } else { | |
| 1043 | debugs(15, DBG_CRITICAL, "ERROR: neighborsUdpAck: Unexpected ICP reply: " << opcode_d); | |
| 1044 | } | |
| 1045 | } | |
| 1046 | ||
| 1047 | CachePeer * | |
| 1048 | findCachePeerByName(const char * const name) | |
| 1049 | { | |
| 1050 | for (const auto &p: CurrentCachePeers()) { | |
| 1051 | if (!strcasecmp(name, p->name)) | |
| 1052 | return p.get(); | |
| 1053 | } | |
| 1054 | return nullptr; | |
| 1055 | } | |
| 1056 | ||
| 1057 | int | |
| 1058 | neighborUp(const CachePeer * p) | |
| 1059 | { | |
| 1060 | if (!p->tcp_up) { | |
| 1061 | CallService(p->probeCodeContext, [&] { | |
| 1062 | peerProbeConnect(const_cast<CachePeer*>(p)); | |
| 1063 | }); | |
| 1064 | return 0; | |
| 1065 | } | |
| 1066 | ||
| 1067 | /* | |
| 1068 | * The CachePeer can not be UP if we don't have any IP addresses | |
| 1069 | * for it. | |
| 1070 | */ | |
| 1071 | if (0 == p->n_addresses) { | |
| 1072 | debugs(15, 8, "DOWN (no-ip): " << *p); | |
| 1073 | return 0; | |
| 1074 | } | |
| 1075 | ||
| 1076 | if (p->options.no_query) { | |
| 1077 | debugs(15, 8, "UP (no-query): " << *p); | |
| 1078 | return 1; | |
| 1079 | } | |
| 1080 | ||
| 1081 | if (p->stats.probe_start != 0 && | |
| 1082 | squid_curtime - p->stats.probe_start > Config.Timeout.deadPeer) { | |
| 1083 | debugs(15, 8, "DOWN (dead): " << *p); | |
| 1084 | return 0; | |
| 1085 | } | |
| 1086 | ||
| 1087 | debugs(15, 8, "UP: " << *p); | |
| 1088 | return 1; | |
| 1089 | } | |
| 1090 | ||
| 1091 | time_t | |
| 1092 | positiveTimeout(const time_t timeout) | |
| 1093 | { | |
| 1094 | return max(static_cast<time_t>(1), timeout); | |
| 1095 | } | |
| 1096 | ||
| 1097 | static void | |
| 1098 | peerDNSConfigure(const ipcache_addrs *ia, const Dns::LookupDetails &, void *data) | |
| 1099 | { | |
| 1100 | // TODO: connections to no-longer valid IP addresses should be | |
| 1101 | // closed when we can detect such IP addresses. | |
| 1102 | ||
| 1103 | CachePeer *p = (CachePeer *)data; | |
| 1104 | ||
| 1105 | if (p->n_addresses == 0) { | |
| 1106 | debugs(15, Important(29), "Configuring " << neighborTypeStr(p) << " " << *p); | |
| 1107 | ||
| 1108 | if (p->type == PEER_MULTICAST) | |
| 1109 | debugs(15, DBG_IMPORTANT, " Multicast TTL = " << p->mcast.ttl); | |
| 1110 | } | |
| 1111 | ||
| 1112 | p->n_addresses = 0; | |
| 1113 | ||
| 1114 | if (ia == nullptr) { | |
| 1115 | debugs(0, DBG_CRITICAL, "WARNING: DNS lookup for '" << *p << "' failed!"); | |
| 1116 | return; | |
| 1117 | } | |
| 1118 | ||
| 1119 | if (ia->empty()) { | |
| 1120 | debugs(0, DBG_CRITICAL, "WARNING: No IP address found for '" << *p << "'!"); | |
| 1121 | return; | |
| 1122 | } | |
| 1123 | ||
| 1124 | for (const auto &ip: ia->goodAndBad()) { // TODO: Consider using just good(). | |
| 1125 | if (p->n_addresses < PEER_MAX_ADDRESSES) { | |
| 1126 | const auto idx = p->n_addresses++; | |
| 1127 | p->addresses[idx] = ip; | |
| 1128 | debugs(15, 2, "--> IP address #" << idx << ": " << p->addresses[idx]); | |
| 1129 | } else { | |
| 1130 | debugs(15, 3, "ignoring remaining " << (ia->size() - p->n_addresses) << " ips"); | |
| 1131 | break; | |
| 1132 | } | |
| 1133 | } | |
| 1134 | ||
| 1135 | p->in_addr.setEmpty(); | |
| 1136 | p->in_addr = p->addresses[0]; | |
| 1137 | p->in_addr.port(p->icp.port); | |
| 1138 | ||
| 1139 | peerProbeConnect(p, true); // detect any died or revived peers ASAP | |
| 1140 | ||
| 1141 | if (p->type == PEER_MULTICAST) | |
| 1142 | peerCountMcastPeersSchedule(p, 10); | |
| 1143 | ||
| 1144 | #if USE_ICMP | |
| 1145 | if (p->type != PEER_MULTICAST && IamWorkerProcess()) | |
| 1146 | if (!p->options.no_netdb_exchange) | |
| 1147 | eventAddIsh("netdbExchangeStart", netdbExchangeStart, p, 30.0, 1); | |
| 1148 | #endif | |
| 1149 | ||
| 1150 | if (p->standby.mgr.valid()) | |
| 1151 | PeerPoolMgr::Checkpoint(p->standby.mgr, "resolved peer"); | |
| 1152 | } | |
| 1153 | ||
| 1154 | static void | |
| 1155 | peerScheduleDnsRefreshCheck(const double delayInSeconds) | |
| 1156 | { | |
| 1157 | if (eventFind(peerDnsRefreshCheck, nullptr)) | |
| 1158 | eventDelete(peerDnsRefreshCheck, nullptr); | |
| 1159 | eventAddIsh("peerDnsRefreshCheck", peerDnsRefreshCheck, nullptr, delayInSeconds, 1); | |
| 1160 | } | |
| 1161 | ||
| 1162 | static void | |
| 1163 | peerDnsRefreshCheck(void *) | |
| 1164 | { | |
| 1165 | if (!statSawRecentRequests()) { | |
| 1166 | /* no recent client traffic, wait a bit */ | |
| 1167 | peerScheduleDnsRefreshCheck(180.0); | |
| 1168 | return; | |
| 1169 | } | |
| 1170 | ||
| 1171 | peerDnsRefreshStart(); | |
| 1172 | } | |
| 1173 | ||
| 1174 | static void | |
| 1175 | peerDnsRefreshStart() | |
| 1176 | { | |
| 1177 | const auto savedContext = CodeContext::Current(); | |
| 1178 | for (const auto &p: CurrentCachePeers()) { | |
| 1179 | CodeContext::Reset(p->probeCodeContext); | |
| 1180 | ipcache_nbgethostbyname(p->host, peerDNSConfigure, p.get()); | |
| 1181 | } | |
| 1182 | CodeContext::Reset(savedContext); | |
| 1183 | ||
| 1184 | peerScheduleDnsRefreshCheck(3600.0); | |
| 1185 | } | |
| 1186 | ||
| 1187 | /// whether new TCP probes are currently banned | |
| 1188 | static bool | |
| 1189 | peerProbeIsBusy(const CachePeer *p) | |
| 1190 | { | |
| 1191 | if (p->testing_now > 0) { | |
| 1192 | debugs(15, 8, "yes, probing " << p); | |
| 1193 | return true; | |
| 1194 | } | |
| 1195 | if (squid_curtime - p->stats.last_connect_probe == 0) { | |
| 1196 | debugs(15, 8, "yes, just probed " << p); | |
| 1197 | return true; | |
| 1198 | } | |
| 1199 | return false; | |
| 1200 | } | |
| 1201 | /* | |
| 1202 | * peerProbeConnect will be called on dead peers by neighborUp | |
| 1203 | */ | |
| 1204 | static void | |
| 1205 | peerProbeConnect(CachePeer *p, const bool reprobeIfBusy) | |
| 1206 | { | |
| 1207 | if (peerProbeIsBusy(p)) { | |
| 1208 | p->reprobe = reprobeIfBusy; | |
| 1209 | return; | |
| 1210 | } | |
| 1211 | p->reprobe = false; | |
| 1212 | ||
| 1213 | const auto ctimeout = p->connectTimeout(); | |
| 1214 | /* for each IP address of this CachePeer. find one that we can connect to and probe it. */ | |
| 1215 | for (int i = 0; i < p->n_addresses; ++i) { | |
| 1216 | Comm::ConnectionPointer conn = new Comm::Connection; | |
| 1217 | conn->remote = p->addresses[i]; | |
| 1218 | conn->remote.port(p->http_port); | |
| 1219 | conn->setPeer(p); | |
| 1220 | getOutgoingAddress(nullptr, conn); | |
| 1221 | ||
| 1222 | ++ p->testing_now; | |
| 1223 | ||
| 1224 | AsyncCall::Pointer call = commCbCall(15,3, "peerProbeConnectDone", CommConnectCbPtrFun(peerProbeConnectDone, p)); | |
| 1225 | Comm::ConnOpener *cs = new Comm::ConnOpener(conn, call, ctimeout); | |
| 1226 | cs->setHost(p->host); | |
| 1227 | AsyncJob::Start(cs); | |
| 1228 | } | |
| 1229 | ||
| 1230 | p->stats.last_connect_probe = squid_curtime; | |
| 1231 | } | |
| 1232 | ||
| 1233 | static void | |
| 1234 | peerProbeConnectDone(const Comm::ConnectionPointer &conn, Comm::Flag status, int, void *data) | |
| 1235 | { | |
| 1236 | CachePeer *p = (CachePeer*)data; | |
| 1237 | ||
| 1238 | if (status == Comm::OK) | |
| 1239 | p->noteSuccess(); | |
| 1240 | else | |
| 1241 | p->noteFailure(); | |
| 1242 | ||
| 1243 | -- p->testing_now; | |
| 1244 | conn->close(); | |
| 1245 | // TODO: log this traffic. | |
| 1246 | ||
| 1247 | if (p->reprobe) | |
| 1248 | peerProbeConnect(p); | |
| 1249 | } | |
| 1250 | ||
| 1251 | static void | |
| 1252 | peerCountMcastPeersSchedule(CachePeer * p, time_t when) | |
| 1253 | { | |
| 1254 | if (p->mcast.flags.count_event_pending) | |
| 1255 | return; | |
| 1256 | ||
| 1257 | eventAdd("peerCountMcastPeersStart", | |
| 1258 | peerCountMcastPeersStart, | |
| 1259 | p, | |
| 1260 | (double) when, 1); | |
| 1261 | ||
| 1262 | p->mcast.flags.count_event_pending = true; | |
| 1263 | } | |
| 1264 | ||
| 1265 | static void | |
| 1266 | peerCountMcastPeersStart(void *data) | |
| 1267 | { | |
| 1268 | const auto peer = static_cast<CachePeer*>(data); | |
| 1269 | CallContextCreator([peer] { | |
| 1270 | peerCountMcastPeersCreateAndSend(peer); | |
| 1271 | }); | |
| 1272 | peerCountMcastPeersSchedule(peer, MCAST_COUNT_RATE); | |
| 1273 | } | |
| 1274 | ||
| 1275 | /// initiates an ICP transaction to a multicast peer | |
| 1276 | static void | |
| 1277 | peerCountMcastPeersCreateAndSend(CachePeer * const p) | |
| 1278 | { | |
| 1279 | // XXX: Do not create lots of complex fake objects (while abusing their | |
| 1280 | // APIs) to pass around a few basic data points like start_ping and ping! | |
| 1281 | MemObject *mem; | |
| 1282 | int reqnum; | |
| 1283 | // TODO: use class AnyP::Uri instead of constructing and re-parsing a string | |
| 1284 | LOCAL_ARRAY(char, url, MAX_URL); | |
| 1285 | assert(p->type == PEER_MULTICAST); | |
| 1286 | p->mcast.flags.count_event_pending = false; | |
| 1287 | snprintf(url, MAX_URL, "http://"); | |
| 1288 | p->in_addr.toUrl(url+7, MAX_URL -8 ); | |
| 1289 | strcat(url, "/"); | |
| 1290 | const auto mx = MasterXaction::MakePortless<XactionInitiator::initPeerMcast>(); | |
| 1291 | auto *req = HttpRequest::FromUrlXXX(url, mx); | |
| 1292 | assert(req != nullptr); | |
| 1293 | const AccessLogEntry::Pointer ale = new AccessLogEntry; | |
| 1294 | ale->request = req; | |
| 1295 | CodeContext::Reset(ale); | |
| 1296 | StoreEntry *fake = storeCreateEntry(url, url, RequestFlags(), Http::METHOD_GET); | |
| 1297 | const auto psstate = new PeerSelector(nullptr); | |
| 1298 | psstate->request = req; | |
| 1299 | HTTPMSGLOCK(psstate->request); | |
| 1300 | psstate->entry = fake; | |
| 1301 | psstate->peerCountMcastPeerXXX = cbdataReference(p); | |
| 1302 | psstate->ping.start = current_time; | |
| 1303 | psstate->al = ale; | |
| 1304 | mem = fake->mem_obj; | |
| 1305 | mem->request = psstate->request; | |
| 1306 | mem->start_ping = current_time; | |
| 1307 | mem->ping_reply_callback = peerCountHandleIcpReply; | |
| 1308 | mem->ircb_data = psstate; | |
| 1309 | mcastSetTtl(icpOutgoingConn->fd, p->mcast.ttl); | |
| 1310 | p->mcast.id = mem->id; | |
| 1311 | reqnum = icpSetCacheKey((const cache_key *)fake->key); | |
| 1312 | icpCreateAndSend(ICP_QUERY, 0, url, reqnum, 0, | |
| 1313 | icpOutgoingConn->fd, p->in_addr, psstate->al); | |
| 1314 | fake->ping_status = PING_WAITING; // TODO: refactor to use PeerSelector::startPingWaiting() | |
| 1315 | eventAdd("peerCountMcastPeersDone", | |
| 1316 | peerCountMcastPeersDone, | |
| 1317 | psstate, | |
| 1318 | Config.Timeout.mcast_icp_query / 1000.0, 1); | |
| 1319 | p->mcast.flags.counting = true; | |
| 1320 | } | |
| 1321 | ||
| 1322 | static void | |
| 1323 | peerCountMcastPeersDone(void *data) | |
| 1324 | { | |
| 1325 | const auto psstate = static_cast<PeerSelector*>(data); | |
| 1326 | CallBack(psstate->al, [psstate] { | |
| 1327 | peerCountMcastPeersAbort(psstate); | |
| 1328 | delete psstate; | |
| 1329 | }); | |
| 1330 | } | |
| 1331 | ||
| 1332 | /// ends counting of multicast ICP replies | |
| 1333 | /// to the ICP query initiated by peerCountMcastPeersCreateAndSend() | |
| 1334 | static void | |
| 1335 | peerCountMcastPeersAbort(PeerSelector * const psstate) | |
| 1336 | { | |
| 1337 | StoreEntry *fake = psstate->entry; | |
| 1338 | ||
| 1339 | if (cbdataReferenceValid(psstate->peerCountMcastPeerXXX)) { | |
| 1340 | CachePeer *p = (CachePeer *)psstate->peerCountMcastPeerXXX; | |
| 1341 | p->mcast.flags.counting = false; | |
| 1342 | p->mcast.avg_n_members = Math::doubleAverage(p->mcast.avg_n_members, (double) psstate->ping.n_recv, ++p->mcast.n_times_counted, 10); | |
| 1343 | debugs(15, DBG_IMPORTANT, "Group " << *p << ": " << psstate->ping.n_recv << | |
| 1344 | " replies, "<< std::setw(4)<< std::setprecision(2) << | |
| 1345 | p->mcast.avg_n_members <<" average, RTT " << p->stats.rtt); | |
| 1346 | p->mcast.n_replies_expected = (int) p->mcast.avg_n_members; | |
| 1347 | } | |
| 1348 | ||
| 1349 | cbdataReferenceDone(psstate->peerCountMcastPeerXXX); | |
| 1350 | ||
| 1351 | fake->abort(); // sets ENTRY_ABORTED and initiates related cleanup | |
| 1352 | fake->mem_obj->request = nullptr; | |
| 1353 | fake->unlock("peerCountMcastPeersDone"); | |
| 1354 | } | |
| 1355 | ||
| 1356 | static void | |
| 1357 | peerCountHandleIcpReply(CachePeer * p, peer_t, AnyP::ProtocolType proto, void *, void *data) | |
| 1358 | { | |
| 1359 | const auto psstate = static_cast<PeerSelector*>(data); | |
| 1360 | StoreEntry *fake = psstate->entry; | |
| 1361 | assert(fake); | |
| 1362 | MemObject *mem = fake->mem_obj; | |
| 1363 | assert(mem); | |
| 1364 | int rtt = tvSubMsec(mem->start_ping, current_time); | |
| 1365 | assert(proto == AnyP::PROTO_ICP); | |
| 1366 | ++ psstate->ping.n_recv; | |
| 1367 | int rtt_av_factor = RTT_AV_FACTOR; | |
| 1368 | ||
| 1369 | if (p->options.weighted_roundrobin) | |
| 1370 | rtt_av_factor = RTT_BACKGROUND_AV_FACTOR; | |
| 1371 | ||
| 1372 | p->stats.rtt = Math::intAverage(p->stats.rtt, rtt, psstate->ping.n_recv, rtt_av_factor); | |
| 1373 | } | |
| 1374 | ||
| 1375 | static void | |
| 1376 | neighborDumpPeers(StoreEntry * sentry) | |
| 1377 | { | |
| 1378 | dump_peers(sentry, Config.peers); | |
| 1379 | } | |
| 1380 | ||
| 1381 | void | |
| 1382 | dump_peer_options(StoreEntry * sentry, CachePeer * p) | |
| 1383 | { | |
| 1384 | PackableStream os(*sentry); | |
| 1385 | ||
| 1386 | if (p->options.proxy_only) | |
| 1387 | os << " proxy-only"; | |
| 1388 | ||
| 1389 | if (p->options.no_query) | |
| 1390 | os << " no-query"; | |
| 1391 | ||
| 1392 | if (p->options.background_ping) | |
| 1393 | os << " background-ping"; | |
| 1394 | ||
| 1395 | if (p->options.no_digest) | |
| 1396 | os << " no-digest"; | |
| 1397 | ||
| 1398 | if (p->options.default_parent) | |
| 1399 | os << " default"; | |
| 1400 | ||
| 1401 | if (p->options.roundrobin) | |
| 1402 | os << " round-robin"; | |
| 1403 | ||
| 1404 | if (p->options.carp) | |
| 1405 | os << " carp"; | |
| 1406 | ||
| 1407 | #if USE_AUTH | |
| 1408 | if (p->options.userhash) | |
| 1409 | os << " userhash"; | |
| 1410 | #endif | |
| 1411 | ||
| 1412 | if (p->options.sourcehash) | |
| 1413 | os << " sourcehash"; | |
| 1414 | ||
| 1415 | if (p->options.weighted_roundrobin) | |
| 1416 | os << " weighted-round-robin"; | |
| 1417 | ||
| 1418 | if (p->options.mcast_responder) | |
| 1419 | os << " multicast-responder"; | |
| 1420 | ||
| 1421 | if (p->options.mcast_siblings) | |
| 1422 | os << " multicast-siblings"; | |
| 1423 | ||
| 1424 | if (p->weight != 1) | |
| 1425 | os << " weight=" << p->weight; | |
| 1426 | ||
| 1427 | if (p->options.closest_only) | |
| 1428 | os << " closest-only"; | |
| 1429 | ||
| 1430 | #if USE_HTCP | |
| 1431 | if (p->options.htcp) { | |
| 1432 | os << " htcp"; | |
| 1433 | std::vector<const char *, PoolingAllocator<const char *> > opts; | |
| 1434 | if (p->options.htcp_oldsquid) | |
| 1435 | opts.push_back("oldsquid"); | |
| 1436 | if (p->options.htcp_no_clr) | |
| 1437 | opts.push_back("no-clr"); | |
| 1438 | if (p->options.htcp_no_purge_clr) | |
| 1439 | opts.push_back("no-purge-clr"); | |
| 1440 | if (p->options.htcp_only_clr) | |
| 1441 | opts.push_back("only-clr"); | |
| 1442 | if (p->options.htcp_forward_clr) | |
| 1443 | opts.push_back("forward-clr"); | |
| 1444 | os << AsList(opts).prefixedBy("=").delimitedBy(","); | |
| 1445 | } | |
| 1446 | #endif | |
| 1447 | ||
| 1448 | if (p->options.no_netdb_exchange) | |
| 1449 | os << " no-netdb-exchange"; | |
| 1450 | ||
| 1451 | #if USE_DELAY_POOLS | |
| 1452 | if (p->options.no_delay) | |
| 1453 | os << " no-delay"; | |
| 1454 | #endif | |
| 1455 | ||
| 1456 | if (p->login) | |
| 1457 | os << " login=" << p->login; | |
| 1458 | ||
| 1459 | if (p->mcast.ttl > 0) | |
| 1460 | os << " ttl=" << p->mcast.ttl; | |
| 1461 | ||
| 1462 | if (p->connect_timeout_raw > 0) | |
| 1463 | os << " connect-timeout=" << p->connect_timeout_raw; | |
| 1464 | ||
| 1465 | if (p->connect_fail_limit != PEER_TCP_MAGIC_COUNT) | |
| 1466 | os << " connect-fail-limit=" << p->connect_fail_limit; | |
| 1467 | ||
| 1468 | #if USE_CACHE_DIGESTS | |
| 1469 | ||
| 1470 | if (p->digest_url) | |
| 1471 | os << " digest-url=" << p->digest_url; | |
| 1472 | ||
| 1473 | #endif | |
| 1474 | ||
| 1475 | if (p->options.allow_miss) | |
| 1476 | os << " allow-miss"; | |
| 1477 | ||
| 1478 | if (p->options.no_tproxy) | |
| 1479 | os << " no-tproxy"; | |
| 1480 | ||
| 1481 | if (p->max_conn > 0) | |
| 1482 | os << " max-conn=" << p->max_conn; | |
| 1483 | ||
| 1484 | if (p->standby.limit > 0) | |
| 1485 | os << " standby=" << p->standby.limit; | |
| 1486 | ||
| 1487 | if (p->options.originserver) | |
| 1488 | os << " originserver"; | |
| 1489 | ||
| 1490 | if (p->domain) | |
| 1491 | os << " forceddomain=" << p->domain; | |
| 1492 | ||
| 1493 | if (p->connection_auth == 0) | |
| 1494 | os << " connection-auth=off"; | |
| 1495 | else if (p->connection_auth == 1) | |
| 1496 | os << " connection-auth=on"; | |
| 1497 | else if (p->connection_auth == 2) | |
| 1498 | os << " connection-auth=auto"; | |
| 1499 | ||
| 1500 | p->secure.dumpCfg(os, "tls-"); | |
| 1501 | os << '\n'; | |
| 1502 | } | |
| 1503 | ||
| 1504 | static void | |
| 1505 | dump_peers(StoreEntry *sentry, CachePeers *peers) | |
| 1506 | { | |
| 1507 | char ntoabuf[MAX_IPSTRLEN]; | |
| 1508 | int i; | |
| 1509 | ||
| 1510 | if (!peers) { | |
| 1511 | storeAppendPrintf(sentry, "There are no neighbors installed.\n"); | |
| 1512 | return; | |
| 1513 | } | |
| 1514 | ||
| 1515 | for (const auto &peer: *peers) { | |
| 1516 | const auto e = peer.get(); | |
| 1517 | assert(e->host != nullptr); | |
| 1518 | storeAppendPrintf(sentry, "\n%-11.11s: %s\n", | |
| 1519 | neighborTypeStr(e), | |
| 1520 | e->name); | |
| 1521 | storeAppendPrintf(sentry, "Host : %s/%d/%d\n", | |
| 1522 | e->host, | |
| 1523 | e->http_port, | |
| 1524 | e->icp.port); | |
| 1525 | storeAppendPrintf(sentry, "Flags :"); | |
| 1526 | dump_peer_options(sentry, e); | |
| 1527 | ||
| 1528 | for (i = 0; i < e->n_addresses; ++i) { | |
| 1529 | storeAppendPrintf(sentry, "Address[%d] : %s\n", i, | |
| 1530 | e->addresses[i].toStr(ntoabuf,MAX_IPSTRLEN) ); | |
| 1531 | } | |
| 1532 | ||
| 1533 | storeAppendPrintf(sentry, "Status : %s\n", | |
| 1534 | neighborUp(e) ? "Up" : "Down"); | |
| 1535 | storeAppendPrintf(sentry, "FETCHES : %d\n", e->stats.fetches); | |
| 1536 | storeAppendPrintf(sentry, "OPEN CONNS : %d\n", e->stats.conn_open); | |
| 1537 | storeAppendPrintf(sentry, "AVG RTT : %d msec\n", e->stats.rtt); | |
| 1538 | ||
| 1539 | if (!e->options.no_query) { | |
| 1540 | storeAppendPrintf(sentry, "LAST QUERY : %8d seconds ago\n", | |
| 1541 | (int) (squid_curtime - e->stats.last_query)); | |
| 1542 | ||
| 1543 | if (e->stats.last_reply > 0) | |
| 1544 | storeAppendPrintf(sentry, "LAST REPLY : %8d seconds ago\n", | |
| 1545 | (int) (squid_curtime - e->stats.last_reply)); | |
| 1546 | else | |
| 1547 | storeAppendPrintf(sentry, "LAST REPLY : none received\n"); | |
| 1548 | ||
| 1549 | storeAppendPrintf(sentry, "PINGS SENT : %8d\n", e->stats.pings_sent); | |
| 1550 | ||
| 1551 | storeAppendPrintf(sentry, "PINGS ACKED: %8d %3d%%\n", | |
| 1552 | e->stats.pings_acked, | |
| 1553 | Math::intPercent(e->stats.pings_acked, e->stats.pings_sent)); | |
| 1554 | } | |
| 1555 | ||
| 1556 | storeAppendPrintf(sentry, "IGNORED : %8d %3d%%\n", e->stats.ignored_replies, Math::intPercent(e->stats.ignored_replies, e->stats.pings_acked)); | |
| 1557 | ||
| 1558 | if (!e->options.no_query) { | |
| 1559 | storeAppendPrintf(sentry, "Histogram of PINGS ACKED:\n"); | |
| 1560 | #if USE_HTCP | |
| 1561 | ||
| 1562 | if (e->options.htcp) { | |
| 1563 | storeAppendPrintf(sentry, "\tMisses\t%8d %3d%%\n", | |
| 1564 | e->htcp.counts[0], | |
| 1565 | Math::intPercent(e->htcp.counts[0], e->stats.pings_acked)); | |
| 1566 | storeAppendPrintf(sentry, "\tHits\t%8d %3d%%\n", | |
| 1567 | e->htcp.counts[1], | |
| 1568 | Math::intPercent(e->htcp.counts[1], e->stats.pings_acked)); | |
| 1569 | } else { | |
| 1570 | #endif | |
| 1571 | ||
| 1572 | for (auto op : WholeEnum<icp_opcode>()) { | |
| 1573 | if (e->icp.counts[op] == 0) | |
| 1574 | continue; | |
| 1575 | ||
| 1576 | storeAppendPrintf(sentry, " %12.12s : %8d %3d%%\n", | |
| 1577 | icp_opcode_str[op], | |
| 1578 | e->icp.counts[op], | |
| 1579 | Math::intPercent(e->icp.counts[op], e->stats.pings_acked)); | |
| 1580 | } | |
| 1581 | ||
| 1582 | #if USE_HTCP | |
| 1583 | ||
| 1584 | } | |
| 1585 | ||
| 1586 | #endif | |
| 1587 | ||
| 1588 | } | |
| 1589 | ||
| 1590 | if (e->stats.last_connect_failure) { | |
| 1591 | storeAppendPrintf(sentry, "Last failed connect() at: %s\n", | |
| 1592 | Time::FormatHttpd(e->stats.last_connect_failure)); | |
| 1593 | } | |
| 1594 | ||
| 1595 | storeAppendPrintf(sentry, "keep-alive ratio: %d%%\n", Math::intPercent(e->stats.n_keepalives_recv, e->stats.n_keepalives_sent)); | |
| 1596 | } | |
| 1597 | } | |
| 1598 | ||
| 1599 | #if USE_HTCP | |
| 1600 | void | |
| 1601 | neighborsHtcpReply(const cache_key * key, HtcpReplyData * htcp, const Ip::Address &from) | |
| 1602 | { | |
| 1603 | StoreEntry *e = Store::Root().findCallbackXXX(key); | |
| 1604 | MemObject *mem = nullptr; | |
| 1605 | CachePeer *p; | |
| 1606 | peer_t ntype = PEER_NONE; | |
| 1607 | debugs(15, 6, "neighborsHtcpReply: " << | |
| 1608 | (htcp->hit ? "HIT" : "MISS") << " " << | |
| 1609 | storeKeyText(key) ); | |
| 1610 | ||
| 1611 | if (nullptr != e) | |
| 1612 | mem = e->mem_obj; | |
| 1613 | ||
| 1614 | if ((p = whichPeer(from))) | |
| 1615 | neighborAliveHtcp(p, mem, htcp); | |
| 1616 | ||
| 1617 | /* Does the entry exist? */ | |
| 1618 | if (nullptr == e) { | |
| 1619 | debugs(12, 3, "neighyborsHtcpReply: Cache key '" << storeKeyText(key) << "' not found"); | |
| 1620 | neighborCountIgnored(p); | |
| 1621 | return; | |
| 1622 | } | |
| 1623 | ||
| 1624 | /* check if someone is already fetching it */ | |
| 1625 | if (EBIT_TEST(e->flags, ENTRY_DISPATCHED)) { | |
| 1626 | debugs(15, 3, "neighborsUdpAck: '" << storeKeyText(key) << "' already being fetched."); | |
| 1627 | neighborCountIgnored(p); | |
| 1628 | return; | |
| 1629 | } | |
| 1630 | ||
| 1631 | if (mem == nullptr) { | |
| 1632 | debugs(15, 2, "Ignoring reply for missing mem_obj: " << storeKeyText(key)); | |
| 1633 | neighborCountIgnored(p); | |
| 1634 | return; | |
| 1635 | } | |
| 1636 | ||
| 1637 | if (e->ping_status != PING_WAITING) { | |
| 1638 | debugs(15, 2, "neighborsUdpAck: Entry " << storeKeyText(key) << " is not PING_WAITING"); | |
| 1639 | neighborCountIgnored(p); | |
| 1640 | return; | |
| 1641 | } | |
| 1642 | ||
| 1643 | if (!e->locked()) { | |
| 1644 | // TODO: many entries are unlocked; why is this reported at level 1? | |
| 1645 | debugs(12, DBG_IMPORTANT, "neighborsUdpAck: '" << storeKeyText(key) << "' has no locks"); | |
| 1646 | neighborCountIgnored(p); | |
| 1647 | return; | |
| 1648 | } | |
| 1649 | ||
| 1650 | if (!mem->ircb_data) { | |
| 1651 | debugs(12, DBG_IMPORTANT, "ERROR: Squid BUG: missing HTCP callback data for " << *e); | |
| 1652 | neighborCountIgnored(p); | |
| 1653 | return; | |
| 1654 | } | |
| 1655 | ||
| 1656 | if (p) { | |
| 1657 | ntype = neighborType(p, mem->request->url); | |
| 1658 | neighborUpdateRtt(p, mem); | |
| 1659 | } | |
| 1660 | ||
| 1661 | if (ignoreMulticastReply(p, mem->ircb_data)) { | |
| 1662 | neighborCountIgnored(p); | |
| 1663 | return; | |
| 1664 | } | |
| 1665 | ||
| 1666 | debugs(15, 3, "neighborsHtcpReply: e = " << e); | |
| 1667 | // TODO: Refactor (ping_reply_callback,ircb_data) to add CodeContext. | |
| 1668 | mem->ping_reply_callback(p, ntype, AnyP::PROTO_HTCP, htcp, mem->ircb_data); | |
| 1669 | } | |
| 1670 | ||
| 1671 | /* | |
| 1672 | * Send HTCP CLR messages to all peers configured to receive them. | |
| 1673 | */ | |
| 1674 | void | |
| 1675 | neighborsHtcpClear(StoreEntry * e, HttpRequest * req, const HttpRequestMethod &method, htcp_clr_reason reason) | |
| 1676 | { | |
| 1677 | char buf[128]; | |
| 1678 | ||
| 1679 | for (const auto &p: CurrentCachePeers()) { | |
| 1680 | if (!p->options.htcp) { | |
| 1681 | continue; | |
| 1682 | } | |
| 1683 | if (p->options.htcp_no_clr) { | |
| 1684 | continue; | |
| 1685 | } | |
| 1686 | if (p->options.htcp_no_purge_clr && reason == HTCP_CLR_PURGE) { | |
| 1687 | continue; | |
| 1688 | } | |
| 1689 | debugs(15, 3, "neighborsHtcpClear: sending CLR to " << p->in_addr.toUrl(buf, 128)); | |
| 1690 | htcpClear(e, req, method, p.get(), reason); | |
| 1691 | } | |
| 1692 | } | |
| 1693 | ||
| 1694 | #endif | |
| 1695 |