]>
Commit | Line | Data |
---|---|---|
55622953 | 1 | /* |
f70aedc4 | 2 | * Copyright (C) 1996-2021 The Squid Software Foundation and contributors |
55622953 CT |
3 | * |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | #include "squid.h" | |
10 | #include "AccessLogEntry.h" | |
99ecc6a4 | 11 | #include "base/CodeContext.h" |
55622953 | 12 | #include "CachePeer.h" |
55622953 | 13 | #include "errorpage.h" |
a70e75b7 | 14 | #include "FwdState.h" |
55622953 CT |
15 | #include "HappyConnOpener.h" |
16 | #include "HttpRequest.h" | |
17 | #include "ip/QosConfig.h" | |
18 | #include "neighbors.h" | |
19 | #include "pconn.h" | |
20 | #include "PeerPoolMgr.h" | |
55622953 CT |
21 | #include "SquidConfig.h" |
22 | ||
23 | CBDATA_CLASS_INIT(HappyConnOpener); | |
24 | ||
25 | // HappyOrderEnforcer optimizes enforcement of the "pause before opening a spare | |
26 | // connection" requirements. Its inefficient alternative would add hundreds of | |
27 | // concurrent events to the Squid event queue in many busy configurations, one | |
28 | // concurrent event per concurrent HappyConnOpener job. | |
29 | // | |
30 | // EventScheduler::schedule() uses linear search to find the right place for a | |
31 | // new event; having hundreds of concurrent events is prohibitively expensive. | |
32 | // Both alternatives may have comparable high rate of eventAdd() calls, but | |
33 | // HappyOrderEnforcer usually schedules the first or second event (as opposed to | |
34 | // events that would be fired after hundreds of already scheduled events, making | |
35 | // that linear search a lot longer). | |
36 | // | |
37 | // EventScheduler::cancel() also uses linear search. HappyOrderEnforcer does not | |
38 | // need to cancel scheduled events, while its inefficient alternative may cancel | |
39 | // events at a rate comparable to the high eventAdd() rate -- many events would | |
40 | // be scheduled in vain because external factors would speed up (or make | |
41 | // unnecessary) spare connection attempts, canceling the wait. | |
42 | // | |
43 | // This optimization is possible only where each job needs to pause for the same | |
44 | // amount of time, creating a naturally ordered list of jobs waiting to be | |
45 | // resumed. This is why two HappyOrderEnforcers are needed to efficiently honor | |
46 | // both happy_eyeballs_connect_timeout and happy_eyeballs_connect_gap | |
47 | // directives. | |
48 | ||
49 | /// Efficiently drains a FIFO HappyConnOpener queue while delaying each "pop" | |
50 | /// event by the time determined by the top element currently in the queue. Its | |
51 | /// current cbdata-free implementation assumes static storage duration. | |
52 | class HappyOrderEnforcer | |
53 | { | |
54 | public: | |
55 | /// \param aName names scheduled events, for debugging | |
56 | HappyOrderEnforcer(const char *aName): name(aName) {} | |
57 | ||
58 | /// resumes jobs that need resuming (if any) | |
59 | void checkpoint(); | |
60 | ||
61 | /// starts managing the job's wait; the job should expect a call back | |
62 | void enqueue(HappyConnOpener &); | |
63 | ||
64 | /// stops managing the job's wait; cancels the pending callback, if any | |
65 | void dequeue(HappyConnOpener &); | |
66 | ||
67 | const char * const name; ///< waiting event name, for debugging | |
68 | ||
69 | protected: | |
70 | virtual bool readyNow(const HappyConnOpener &) const = 0; | |
71 | virtual AsyncCall::Pointer notify(const CbcPointer<HappyConnOpener> &) = 0; | |
72 | ||
73 | bool waiting() const { return waitEnd_ > 0; } | |
74 | bool startedWaiting(const HappyAbsoluteTime lastStart, const int cfgTimeoutMsec) const; | |
75 | ||
76 | private: | |
77 | static void NoteWaitOver(void *raw); | |
78 | void noteWaitOver(); | |
79 | ||
80 | HappySpareWaitList jobs_; ///< queued jobs waiting their turn | |
81 | mutable HappyAbsoluteTime waitEnd_ = 0; ///< expected NoteWaitOver() call time (or zero) | |
82 | }; | |
83 | ||
84 | std::ostream &operator <<(std::ostream &os, const HappyConnOpenerAnswer &answer) | |
85 | { | |
86 | if (answer.error.set()) | |
87 | os << "bad "; | |
88 | if (answer.conn) | |
89 | os << (answer.reused ? "reused " : "new ") << answer.conn; | |
90 | if (answer.n_tries != 1) | |
91 | os << " after " << answer.n_tries; | |
92 | return os; | |
93 | } | |
94 | ||
95 | /// enforces happy_eyeballs_connect_timeout | |
96 | class PrimeChanceGiver: public HappyOrderEnforcer | |
97 | { | |
98 | public: | |
99 | PrimeChanceGiver(): HappyOrderEnforcer("happy_eyeballs_connect_timeout enforcement") {} | |
100 | ||
101 | /* HappyOrderEnforcer API */ | |
102 | virtual bool readyNow(const HappyConnOpener &job) const override; | |
103 | ||
104 | private: | |
105 | /* HappyOrderEnforcer API */ | |
106 | virtual AsyncCall::Pointer notify(const CbcPointer<HappyConnOpener> &) override; | |
107 | }; | |
108 | ||
109 | /// enforces happy_eyeballs_connect_gap and happy_eyeballs_connect_limit | |
110 | class SpareAllowanceGiver: public HappyOrderEnforcer | |
111 | { | |
112 | public: | |
113 | SpareAllowanceGiver(): HappyOrderEnforcer("happy_eyeballs_connect_gap/happy_eyeballs_connect_limit enforcement") {} | |
114 | ||
115 | /* HappyOrderEnforcer API */ | |
116 | virtual bool readyNow(const HappyConnOpener &job) const override; | |
117 | ||
118 | /// reacts to HappyConnOpener discovering readyNow() conditions for a spare path | |
119 | /// the caller must attempt to open a spare connection immediately | |
120 | void jobGotInstantAllowance(); | |
121 | ||
122 | /// reacts to HappyConnOpener getting a spare connection opening result | |
123 | void jobUsedAllowance(); | |
124 | ||
125 | /// reacts to HappyConnOpener dropping its spare connection allowance | |
126 | void jobDroppedAllowance(); | |
127 | ||
128 | private: | |
129 | /* HappyOrderEnforcer API */ | |
130 | virtual AsyncCall::Pointer notify(const CbcPointer<HappyConnOpener> &) override; | |
131 | ||
132 | bool concurrencyLimitReached() const; | |
133 | void recordAllowance(); | |
134 | void forgetAllowance(); | |
135 | ||
136 | /// the time of the last noteSpareAllowance() call | |
137 | HappyAbsoluteTime lastAllowanceStart = 0; | |
138 | ||
139 | /// the number of noteSpareAllowance() calls not already | |
140 | /// returned via jobUsedAllowance() or jobDroppedAllowance() | |
141 | int concurrencyLevel = 0; | |
142 | }; | |
143 | ||
144 | PrimeChanceGiver ThePrimeChanceGiver; | |
145 | SpareAllowanceGiver TheSpareAllowanceGiver; | |
146 | ||
147 | /* HappyOrderEnforcer */ | |
148 | ||
149 | void | |
150 | HappyOrderEnforcer::enqueue(HappyConnOpener &job) | |
151 | { | |
152 | Must(!job.spareWaiting.callback); | |
153 | jobs_.emplace_back(&job); | |
154 | job.spareWaiting.position = std::prev(jobs_.end()); | |
99ecc6a4 | 155 | job.spareWaiting.codeContext = CodeContext::Current(); |
55622953 CT |
156 | } |
157 | ||
158 | void | |
159 | HappyOrderEnforcer::dequeue(HappyConnOpener &job) | |
160 | { | |
161 | if (job.spareWaiting.callback) { | |
162 | job.spareWaiting.callback->cancel("HappyOrderEnforcer::dequeue"); | |
163 | job.spareWaiting.callback = nullptr; | |
164 | } else { | |
165 | Must(!jobs_.empty()); | |
166 | jobs_.erase(job.spareWaiting.position); | |
167 | } | |
168 | } | |
169 | ||
170 | void | |
171 | HappyOrderEnforcer::checkpoint() | |
172 | { | |
173 | while (!jobs_.empty()) { | |
174 | if (const auto jobPtr = jobs_.front().valid()) { | |
175 | auto &job = *jobPtr; | |
99ecc6a4 | 176 | if (!readyNow(job)) |
55622953 | 177 | break; // the next job cannot be ready earlier (FIFO) |
99ecc6a4 EB |
178 | CallBack(job.spareWaiting.codeContext, [&] { |
179 | job.spareWaiting.callback = notify(jobPtr); // and fall through to the next job | |
180 | }); | |
55622953 CT |
181 | } |
182 | jobs_.pop_front(); | |
183 | } | |
184 | } | |
185 | ||
186 | bool | |
187 | HappyOrderEnforcer::startedWaiting(const HappyAbsoluteTime lastStart, const int cfgTimeoutMsec) const | |
188 | { | |
189 | // Normally, the job would not even be queued if there is no timeout. This | |
190 | // check handles reconfiguration that happened after this job was queued. | |
191 | if (cfgTimeoutMsec <= 0) | |
192 | return false; | |
193 | ||
194 | // convert to seconds and adjust for SMP workers to keep aggregated load in | |
195 | // check despite the lack of coordination among workers | |
196 | const auto tout = static_cast<HappyAbsoluteTime>(cfgTimeoutMsec) * Config.workers / 1000.0; | |
197 | const auto newWaitEnd = std::min(lastStart, current_dtime) + tout; | |
198 | if (newWaitEnd <= current_dtime) | |
199 | return false; // no need to wait | |
200 | ||
201 | // We cannot avoid event accumulation because calling eventDelete() is | |
202 | // unsafe, but any accumulation will be small because it can only be caused | |
203 | // by hot reconfiguration changes or current time jumps. | |
204 | if (!waiting() || newWaitEnd < waitEnd_) { | |
205 | const auto waitTime = newWaitEnd - current_dtime; | |
206 | eventAdd(name, &HappyOrderEnforcer::NoteWaitOver, const_cast<HappyOrderEnforcer*>(this), waitTime, 0, false); | |
207 | waitEnd_ = newWaitEnd; | |
208 | assert(waiting()); | |
209 | } | |
210 | ||
211 | return true; | |
212 | } | |
213 | ||
214 | void | |
215 | HappyOrderEnforcer::NoteWaitOver(void *raw) | |
216 | { | |
217 | assert(raw); | |
218 | static_cast<HappyOrderEnforcer*>(raw)->noteWaitOver(); | |
219 | } | |
220 | ||
221 | void | |
222 | HappyOrderEnforcer::noteWaitOver() | |
223 | { | |
224 | Must(waiting()); | |
225 | waitEnd_ = 0; | |
226 | checkpoint(); | |
227 | } | |
228 | ||
229 | /* PrimeChanceGiver */ | |
230 | ||
231 | bool | |
232 | PrimeChanceGiver::readyNow(const HappyConnOpener &job) const | |
233 | { | |
234 | return !startedWaiting(job.primeStart, Config.happyEyeballs.connect_timeout); | |
235 | } | |
236 | ||
237 | AsyncCall::Pointer | |
238 | PrimeChanceGiver::notify(const CbcPointer<HappyConnOpener> &job) | |
239 | { | |
240 | return CallJobHere(17, 5, job, HappyConnOpener, noteGavePrimeItsChance); | |
241 | } | |
242 | ||
243 | /* SpareAllowanceGiver */ | |
244 | ||
245 | bool | |
246 | SpareAllowanceGiver::readyNow(const HappyConnOpener &) const | |
247 | { | |
248 | return !concurrencyLimitReached() && | |
249 | !startedWaiting(lastAllowanceStart, Config.happyEyeballs.connect_gap); | |
250 | } | |
251 | ||
252 | AsyncCall::Pointer | |
253 | SpareAllowanceGiver::notify(const CbcPointer<HappyConnOpener> &job) | |
254 | { | |
255 | recordAllowance(); | |
256 | return CallJobHere(17, 5, job, HappyConnOpener, noteSpareAllowance); | |
257 | } | |
258 | ||
259 | void | |
260 | SpareAllowanceGiver::jobGotInstantAllowance() | |
261 | { | |
262 | recordAllowance(); | |
263 | } | |
264 | ||
265 | void | |
266 | SpareAllowanceGiver::jobUsedAllowance() | |
267 | { | |
268 | forgetAllowance(); | |
269 | } | |
270 | ||
271 | void | |
272 | SpareAllowanceGiver::jobDroppedAllowance() | |
273 | { | |
274 | // Without happy_eyeballs_connect_gap, lastAllowanceStart does not matter. | |
275 | // Otherwise, the dropped allowance ought to be the last one, and since it | |
276 | // was allowed, we would still observe the gap even if we do not wait now. | |
277 | lastAllowanceStart = 0; | |
278 | ||
279 | forgetAllowance(); | |
280 | } | |
281 | ||
282 | /// account for the given allowance | |
283 | void | |
284 | SpareAllowanceGiver::recordAllowance() | |
285 | { | |
286 | ++concurrencyLevel; | |
287 | lastAllowanceStart = current_dtime; | |
288 | // not a checkpoint(): no other spare can become ready here | |
289 | } | |
290 | ||
291 | void | |
292 | SpareAllowanceGiver::forgetAllowance() | |
293 | { | |
294 | Must(concurrencyLevel); | |
295 | --concurrencyLevel; | |
296 | checkpoint(); | |
297 | } | |
298 | ||
299 | /// whether opening a spare connection now would violate happy_eyeballs_connect_limit | |
300 | bool | |
301 | SpareAllowanceGiver::concurrencyLimitReached() const | |
302 | { | |
303 | if (Config.happyEyeballs.connect_limit < 0) | |
304 | return false; // no limit | |
305 | ||
306 | if (Config.happyEyeballs.connect_limit == 0) | |
307 | return true; // concurrent spares prohibited regardless of spare level | |
308 | ||
309 | // adjust for SMP workers to keep aggregated spare level in check despite | |
310 | // the lack of coordination among workers | |
311 | const auto aggregateLevel = concurrencyLevel * Config.workers; | |
312 | return aggregateLevel >= Config.happyEyeballs.connect_limit; | |
313 | } | |
314 | ||
315 | /* HappyConnOpenerAnswer */ | |
316 | ||
317 | HappyConnOpenerAnswer::~HappyConnOpenerAnswer() | |
318 | { | |
319 | // XXX: When multiple copies of an answer exist, this delete in one copy | |
320 | // invalidates error in other copies -- their error.get() returns nil. The | |
321 | // current code "works", but probably only because the initiator gets the | |
322 | // error before any answer copies are deleted. Same in ~EncryptorAnswer. | |
323 | delete error.get(); | |
324 | } | |
325 | ||
326 | /* HappyConnOpener */ | |
327 | ||
328 | HappyConnOpener::HappyConnOpener(const ResolvedPeers::Pointer &dests, const AsyncCall::Pointer &aCall, HttpRequest::Pointer &request, const time_t aFwdStart, int tries, const AccessLogEntry::Pointer &anAle): | |
329 | AsyncJob("HappyConnOpener"), | |
330 | fwdStart(aFwdStart), | |
331 | callback_(aCall), | |
332 | destinations(dests), | |
333 | ale(anAle), | |
334 | cause(request), | |
335 | n_tries(tries) | |
336 | { | |
337 | assert(destinations); | |
338 | assert(dynamic_cast<Answer*>(callback_->getDialer())); | |
339 | } | |
340 | ||
341 | HappyConnOpener::~HappyConnOpener() | |
342 | { | |
343 | safe_free(host_); | |
344 | delete lastError; | |
345 | } | |
346 | ||
347 | void | |
348 | HappyConnOpener::setHost(const char *h) | |
349 | { | |
350 | safe_free(host_); | |
351 | if (h) | |
352 | host_ = xstrdup(h); | |
353 | } | |
354 | ||
355 | void | |
356 | HappyConnOpener::start() | |
357 | { | |
358 | destinations->notificationPending = false; | |
359 | checkForNewConnection(); | |
360 | } | |
361 | ||
362 | bool | |
363 | HappyConnOpener::doneAll() const | |
364 | { | |
365 | if (!callback_) | |
366 | return true; // (probably found a good path and) informed the requestor | |
367 | ||
368 | // TODO: Expose AsyncCall::canFire() instead so that code like this can | |
369 | // detect gone initiators without the need to explicitly cancel callbacks. | |
370 | if (callback_->canceled()) | |
371 | return true; // the requestor is gone or has lost interest | |
372 | ||
373 | if (prime || spare) | |
374 | return false; | |
375 | ||
376 | if (ranOutOfTimeOrAttempts()) | |
377 | return true; // trying new connection paths prohibited | |
378 | ||
379 | if (destinations->empty() && destinations->destinationsFinalized) | |
380 | return true; // there are no more paths to try | |
381 | ||
382 | return false; | |
383 | } | |
384 | ||
385 | void | |
386 | HappyConnOpener::swanSong() | |
387 | { | |
388 | debugs(17, 5, this); | |
389 | ||
390 | if (callback_ && !callback_->canceled()) | |
391 | sendFailure(); | |
392 | ||
393 | if (spareWaiting) | |
394 | cancelSpareWait("HappyConnOpener object destructed"); | |
395 | ||
396 | // TODO: Find an automated, faster way to kill no-longer-needed jobs. | |
397 | ||
398 | if (prime) { | |
a4d576de | 399 | cancelAttempt(prime, "job finished during a prime attempt"); |
55622953 CT |
400 | } |
401 | ||
402 | if (spare) { | |
a4d576de | 403 | cancelAttempt(spare, "job finished during a spare attempt"); |
55622953 CT |
404 | if (gotSpareAllowance) { |
405 | TheSpareAllowanceGiver.jobDroppedAllowance(); | |
406 | gotSpareAllowance = false; | |
407 | } | |
408 | } | |
409 | ||
410 | AsyncJob::swanSong(); | |
411 | } | |
412 | ||
413 | const char * | |
414 | HappyConnOpener::status() const | |
415 | { | |
416 | static MemBuf buf; | |
417 | buf.reset(); | |
418 | ||
419 | buf.append(" [", 2); | |
420 | if (stopReason) | |
421 | buf.appendf("Stopped, reason:%s", stopReason); | |
422 | if (prime) { | |
423 | if (prime.path && prime.path->isOpen()) | |
424 | buf.appendf(" prime FD %d", prime.path->fd); | |
425 | else if (prime.connector) | |
426 | buf.appendf(" prime call%ud", prime.connector->id.value); | |
427 | } | |
428 | if (spare) { | |
429 | if (spare.path && spare.path->isOpen()) | |
430 | buf.appendf(" spare FD %d", spare.path->fd); | |
431 | else if (spare.connector) | |
432 | buf.appendf(" spare call%ud", spare.connector->id.value); | |
433 | } | |
434 | if (n_tries) | |
435 | buf.appendf(" tries %d", n_tries); | |
436 | buf.appendf(" %s%u]", id.prefix(), id.value); | |
437 | buf.terminate(); | |
438 | ||
439 | return buf.content(); | |
440 | } | |
441 | ||
442 | /// Create "503 Service Unavailable" or "504 Gateway Timeout" error depending | |
443 | /// on whether this is a validation request. RFC 7234 section 5.2.2 says that | |
444 | /// we MUST reply with "504 Gateway Timeout" if validation fails and cached | |
445 | /// reply has proxy-revalidate, must-revalidate or s-maxage Cache-Control | |
446 | /// directive. | |
447 | ErrorState * | |
448 | HappyConnOpener::makeError(const err_type type) const | |
449 | { | |
450 | const auto statusCode = cause->flags.needValidation ? | |
451 | Http::scGatewayTimeout : Http::scServiceUnavailable; | |
452 | return new ErrorState(type, statusCode, cause.getRaw(), ale); | |
453 | } | |
454 | ||
455 | /// \returns pre-filled Answer if the initiator needs an answer (or nil) | |
456 | HappyConnOpener::Answer * | |
9b7992d9 | 457 | HappyConnOpener::futureAnswer(const PeerConnectionPointer &conn) |
55622953 CT |
458 | { |
459 | if (callback_ && !callback_->canceled()) { | |
460 | const auto answer = dynamic_cast<Answer *>(callback_->getDialer()); | |
461 | assert(answer); | |
462 | answer->conn = conn; | |
463 | answer->n_tries = n_tries; | |
464 | return answer; | |
465 | } | |
466 | return nullptr; | |
467 | } | |
468 | ||
469 | /// send a successful result to the initiator (if it still needs an answer) | |
470 | void | |
9b7992d9 | 471 | HappyConnOpener::sendSuccess(const PeerConnectionPointer &conn, const bool reused, const char *connKind) |
55622953 CT |
472 | { |
473 | debugs(17, 4, connKind << ": " << conn); | |
474 | if (auto *answer = futureAnswer(conn)) { | |
475 | answer->reused = reused; | |
476 | assert(!answer->error); | |
477 | ScheduleCallHere(callback_); | |
478 | } | |
479 | callback_ = nullptr; | |
480 | } | |
481 | ||
a4d576de EB |
482 | /// cancels the in-progress attempt, making its path a future candidate |
483 | void | |
484 | HappyConnOpener::cancelAttempt(Attempt &attempt, const char *reason) | |
485 | { | |
486 | Must(attempt); | |
9b7992d9 | 487 | destinations->reinstatePath(attempt.path); // before attempt.cancel() clears path |
a4d576de EB |
488 | attempt.cancel(reason); |
489 | } | |
490 | ||
55622953 CT |
491 | /// inform the initiator about our failure to connect (if needed) |
492 | void | |
493 | HappyConnOpener::sendFailure() | |
494 | { | |
495 | debugs(17, 3, lastFailedConnection); | |
496 | if (auto *answer = futureAnswer(lastFailedConnection)) { | |
497 | if (!lastError) | |
498 | lastError = makeError(ERR_GATEWAY_FAILURE); | |
499 | answer->error = lastError; | |
500 | assert(answer->error.valid()); | |
501 | lastError = nullptr; // the answer owns it now | |
502 | ScheduleCallHere(callback_); | |
503 | } | |
504 | callback_ = nullptr; | |
505 | } | |
506 | ||
507 | void | |
508 | HappyConnOpener::noteCandidatesChange() | |
509 | { | |
510 | destinations->notificationPending = false; | |
511 | checkForNewConnection(); | |
512 | } | |
513 | ||
514 | /// starts opening (or reusing) a connection to the given destination | |
515 | void | |
9b7992d9 | 516 | HappyConnOpener::startConnecting(Attempt &attempt, PeerConnectionPointer &dest) |
55622953 CT |
517 | { |
518 | Must(!attempt.path); | |
519 | Must(!attempt.connector); | |
520 | Must(dest); | |
521 | ||
522 | const auto bumpThroughPeer = cause->flags.sslBumped && dest->getPeer(); | |
523 | const auto canReuseOld = allowPconn_ && !bumpThroughPeer; | |
524 | if (!canReuseOld || !reuseOldConnection(dest)) | |
525 | openFreshConnection(attempt, dest); | |
526 | } | |
527 | ||
528 | /// reuses a persistent connection to the given destination (if possible) | |
529 | /// \returns true if and only if reuse was possible | |
530 | /// must be called via startConnecting() | |
531 | bool | |
9b7992d9 | 532 | HappyConnOpener::reuseOldConnection(PeerConnectionPointer &dest) |
55622953 CT |
533 | { |
534 | assert(allowPconn_); | |
535 | ||
536 | if (const auto pconn = fwdPconnPool->pop(dest, host_, retriable_)) { | |
537 | ++n_tries; | |
9b7992d9 EB |
538 | dest.finalize(pconn); |
539 | sendSuccess(dest, true, "reused connection"); | |
55622953 CT |
540 | return true; |
541 | } | |
542 | ||
543 | return false; | |
544 | } | |
545 | ||
546 | /// opens a fresh connection to the given destination | |
547 | /// must be called via startConnecting() | |
548 | void | |
9b7992d9 | 549 | HappyConnOpener::openFreshConnection(Attempt &attempt, PeerConnectionPointer &dest) |
55622953 CT |
550 | { |
551 | #if URL_CHECKSUM_DEBUG | |
552 | entry->mem_obj->checkUrlChecksum(); | |
553 | #endif | |
554 | ||
555 | GetMarkingsToServer(cause.getRaw(), *dest); | |
556 | ||
557 | // ConnOpener modifies its destination argument so we reset the source port | |
558 | // in case we are reusing the destination already used by our predecessor. | |
559 | dest->local.port(0); | |
560 | ++n_tries; | |
561 | ||
562 | typedef CommCbMemFunT<HappyConnOpener, CommConnectCbParams> Dialer; | |
563 | AsyncCall::Pointer callConnect = JobCallback(48, 5, Dialer, this, HappyConnOpener::connectDone); | |
564 | const time_t connTimeout = dest->connectTimeout(fwdStart); | |
565 | Comm::ConnOpener *cs = new Comm::ConnOpener(dest, callConnect, connTimeout); | |
566 | if (!dest->getPeer()) | |
567 | cs->setHost(host_); | |
568 | ||
569 | attempt.path = dest; | |
570 | attempt.connector = callConnect; | |
a4d576de | 571 | attempt.opener = cs; |
55622953 CT |
572 | |
573 | AsyncJob::Start(cs); | |
574 | } | |
575 | ||
576 | /// called by Comm::ConnOpener objects after a prime or spare connection attempt | |
577 | /// completes (successfully or not) | |
578 | void | |
579 | HappyConnOpener::connectDone(const CommConnectCbParams ¶ms) | |
580 | { | |
581 | Must(params.conn); | |
582 | const bool itWasPrime = (params.conn == prime.path); | |
583 | const bool itWasSpare = (params.conn == spare.path); | |
584 | Must(itWasPrime != itWasSpare); | |
585 | ||
9b7992d9 | 586 | PeerConnectionPointer handledPath; |
55622953 | 587 | if (itWasPrime) { |
9b7992d9 | 588 | handledPath = prime.path; |
a4d576de | 589 | prime.finish(); |
55622953 | 590 | } else { |
9b7992d9 | 591 | handledPath = spare.path; |
a4d576de | 592 | spare.finish(); |
55622953 CT |
593 | if (gotSpareAllowance) { |
594 | TheSpareAllowanceGiver.jobUsedAllowance(); | |
595 | gotSpareAllowance = false; | |
596 | } | |
597 | } | |
598 | ||
599 | const char *what = itWasPrime ? "new prime connection" : "new spare connection"; | |
600 | if (params.flag == Comm::OK) { | |
9b7992d9 | 601 | sendSuccess(handledPath, false, what); |
55622953 CT |
602 | return; |
603 | } | |
604 | ||
605 | debugs(17, 8, what << " failed: " << params.conn); | |
606 | if (const auto peer = params.conn->getPeer()) | |
607 | peerConnectFailed(peer); | |
608 | params.conn->close(); // TODO: Comm::ConnOpener should do this instead. | |
609 | ||
610 | // remember the last failure (we forward it if we cannot connect anywhere) | |
9b7992d9 | 611 | lastFailedConnection = handledPath; |
55622953 CT |
612 | delete lastError; |
613 | lastError = nullptr; // in case makeError() throws | |
614 | lastError = makeError(ERR_CONNECT_FAIL); | |
615 | lastError->xerrno = params.xerrno; | |
616 | ||
617 | if (spareWaiting) | |
618 | updateSpareWaitAfterPrimeFailure(); | |
619 | ||
620 | checkForNewConnection(); | |
621 | } | |
622 | ||
623 | /// reacts to a prime attempt failure | |
624 | void | |
625 | HappyConnOpener::updateSpareWaitAfterPrimeFailure() | |
626 | { | |
627 | Must(currentPeer); | |
628 | Must(!prime); | |
629 | Must(spareWaiting); | |
630 | ||
631 | if (destinations->doneWithPrimes(*currentPeer)) { | |
632 | cancelSpareWait("all primes failed"); | |
633 | ignoreSpareRestrictions = true; | |
634 | return; // checkForNewConnection() will open a spare connection ASAP | |
635 | } | |
636 | ||
637 | if (spareWaiting.toGivePrimeItsChance) | |
638 | stopGivingPrimeItsChance(); | |
639 | ||
640 | // may still be spareWaiting.forSpareAllowance or | |
641 | // may still be spareWaiting.forPrimesToFail | |
642 | } | |
643 | ||
644 | /// called when the prime attempt has used up its chance for a solo victory | |
645 | void | |
646 | HappyConnOpener::stopGivingPrimeItsChance() { | |
647 | Must(spareWaiting.toGivePrimeItsChance); | |
648 | spareWaiting.toGivePrimeItsChance = false; | |
649 | ThePrimeChanceGiver.dequeue(*this); | |
650 | } | |
651 | ||
652 | /// called when the spare attempt should no longer obey spare connection limits | |
653 | void | |
654 | HappyConnOpener::stopWaitingForSpareAllowance() { | |
655 | Must(spareWaiting.forSpareAllowance); | |
656 | spareWaiting.forSpareAllowance = false; | |
657 | ||
658 | if (spareWaiting.callback) | |
659 | TheSpareAllowanceGiver.jobDroppedAllowance(); | |
660 | TheSpareAllowanceGiver.dequeue(*this); // clears spareWaiting.callback | |
661 | } | |
662 | ||
663 | /// stops waiting for the right conditions to open a spare connection | |
664 | void | |
665 | HappyConnOpener::cancelSpareWait(const char *reason) | |
666 | { | |
667 | debugs(17, 5, "because " << reason); | |
668 | Must(spareWaiting); | |
669 | ||
670 | if (spareWaiting.toGivePrimeItsChance) | |
671 | stopGivingPrimeItsChance(); | |
672 | else if (spareWaiting.forSpareAllowance) | |
673 | stopWaitingForSpareAllowance(); | |
674 | ||
675 | spareWaiting.clear(); | |
676 | } | |
677 | ||
678 | /** Called when an external event changes initiator interest, destinations, | |
679 | * prime, spare, or spareWaiting. Leaves HappyConnOpener in one of these | |
680 | * (mutually exclusive beyond the exceptional state #0) "stable" states: | |
681 | * | |
682 | * 0. Exceptional termination: done() | |
683 | * 1. Processing a single peer: currentPeer | |
684 | * 1.1. Connecting: prime || spare | |
685 | * 1.2. Waiting for spare gap and/or paths: !prime && !spare | |
686 | * 2. Waiting for a new peer: destinations->empty() && !destinations->destinationsFinalized && !currentPeer | |
687 | * 3. Finished: destinations->empty() && destinations->destinationsFinalized && !currentPeer | |
688 | */ | |
689 | void | |
690 | HappyConnOpener::checkForNewConnection() | |
691 | { | |
692 | debugs(17, 7, *destinations); | |
693 | ||
694 | // The order of the top-level if-statements below is important. | |
695 | ||
696 | if (done()) | |
697 | return; // bail ASAP to minimize our waste and others delays (state #0) | |
698 | ||
699 | if (ranOutOfTimeOrAttempts()) { | |
700 | Must(currentPeer); // or we would be done() already | |
701 | return; // will continue working (state #1.1) | |
702 | } | |
703 | ||
704 | // update stale currentPeer and/or stale spareWaiting | |
705 | if (currentPeer && !spare && !prime && destinations->doneWithPeer(*currentPeer)) { | |
706 | debugs(17, 7, "done with peer; " << *currentPeer); | |
707 | if (spareWaiting.forNewPeer) | |
708 | cancelSpareWait("done with peer"); | |
709 | else | |
710 | Must(!spareWaiting); | |
711 | ||
712 | currentPeer = nullptr; | |
713 | ignoreSpareRestrictions = false; | |
714 | Must(!gotSpareAllowance); | |
715 | } else if (currentPeer && !spareWaiting.forNewPeer && spareWaiting && destinations->doneWithSpares(*currentPeer)) { | |
716 | cancelSpareWait("no spares are coming"); | |
717 | spareWaiting.forNewPeer = true; | |
718 | } | |
719 | ||
720 | // open a new prime and/or a new spare connection if needed | |
721 | if (!destinations->empty()) { | |
722 | if (!currentPeer) { | |
9b7992d9 EB |
723 | auto newPrime = destinations->extractFront(); |
724 | currentPeer = newPrime; | |
55622953 CT |
725 | Must(currentPeer); |
726 | debugs(17, 7, "new peer " << *currentPeer); | |
727 | primeStart = current_dtime; | |
9b7992d9 EB |
728 | startConnecting(prime, newPrime); |
729 | // TODO: if reuseOldConnection() in startConnecting() above succeeds, | |
730 | // then we should not get here, and Must(prime) below will fail. | |
55622953 CT |
731 | maybeGivePrimeItsChance(); |
732 | Must(prime); // entering state #1.1 | |
733 | } else { | |
734 | if (!prime) | |
735 | maybeOpenAnotherPrimeConnection(); // may make destinations empty() | |
736 | } | |
737 | ||
738 | if (!spare && !spareWaiting) | |
739 | maybeOpenSpareConnection(); // may make destinations empty() | |
740 | ||
741 | Must(currentPeer); | |
742 | } | |
743 | ||
744 | if (currentPeer) { | |
745 | debugs(17, 7, "working on " << *currentPeer); | |
746 | return; // remaining in state #1.1 or #1.2 | |
747 | } | |
748 | ||
749 | if (!destinations->destinationsFinalized) { | |
750 | debugs(17, 7, "waiting for more peers"); | |
751 | return; // remaining in state #2 | |
752 | } | |
753 | ||
754 | debugs(17, 7, "done; no more peers"); | |
755 | Must(doneAll()); | |
756 | // entering state #3 | |
757 | } | |
758 | ||
759 | void | |
760 | HappyConnOpener::noteGavePrimeItsChance() | |
761 | { | |
762 | Must(spareWaiting.toGivePrimeItsChance); | |
763 | spareWaiting.clear(); | |
764 | checkForNewConnection(); | |
765 | } | |
766 | ||
767 | void | |
768 | HappyConnOpener::noteSpareAllowance() | |
769 | { | |
770 | Must(spareWaiting.forSpareAllowance); | |
771 | spareWaiting.clear(); | |
772 | ||
773 | if (ranOutOfTimeOrAttempts()) { | |
774 | TheSpareAllowanceGiver.jobDroppedAllowance(); | |
775 | return; // will quit or continue working on prime | |
776 | } | |
777 | ||
778 | Must(!gotSpareAllowance); | |
779 | gotSpareAllowance = true; | |
780 | ||
781 | auto dest = destinations->extractSpare(*currentPeer); // ought to succeed | |
782 | startConnecting(spare, dest); | |
783 | } | |
784 | ||
785 | /// starts a prime connection attempt if possible or does nothing otherwise | |
786 | void | |
787 | HappyConnOpener::maybeOpenAnotherPrimeConnection() | |
788 | { | |
789 | Must(currentPeer); | |
790 | if (auto dest = destinations->extractPrime(*currentPeer)) | |
791 | startConnecting(prime, dest); | |
792 | // else wait for more prime paths or their exhaustion | |
793 | } | |
794 | ||
795 | /// starts waiting for a spare permission (if spare connections may be possible) | |
796 | /// or does nothing (otherwise) | |
797 | void | |
798 | HappyConnOpener::maybeGivePrimeItsChance() | |
799 | { | |
800 | Must(currentPeer); | |
801 | Must(prime); | |
802 | Must(!spare); | |
803 | Must(!spareWaiting); | |
804 | ||
805 | if (destinations->doneWithSpares(*currentPeer)) { | |
806 | debugs(17, 7, "no spares for " << *currentPeer); | |
807 | spareWaiting.forNewPeer = true; | |
808 | return; | |
809 | } | |
810 | ||
811 | if (Config.happyEyeballs.connect_limit == 0) { | |
812 | debugs(17, 7, "concurrent spares are prohibited"); | |
813 | spareWaiting.forPrimesToFail = true; | |
814 | return; | |
815 | } | |
816 | ||
817 | if (ThePrimeChanceGiver.readyNow(*this)) { | |
818 | debugs(17, 7, "no happy_eyeballs_connect_timeout"); | |
819 | return; | |
820 | } | |
821 | ||
822 | ThePrimeChanceGiver.enqueue(*this); | |
823 | spareWaiting.toGivePrimeItsChance = true; | |
824 | // wait for a prime connect result or noteGavePrimeItsChance() | |
825 | } | |
826 | ||
827 | /// if possible, starts a spare connection attempt | |
828 | void | |
829 | HappyConnOpener::maybeOpenSpareConnection() | |
830 | { | |
831 | Must(currentPeer); | |
832 | Must(!spare); | |
833 | Must(!spareWaiting); | |
834 | Must(!gotSpareAllowance); | |
835 | ||
836 | if (ranOutOfTimeOrAttempts()) | |
837 | return; // will quit or continue working on prime | |
838 | ||
839 | // jobGotInstantAllowance() call conditions below rely on the readyNow() check here | |
840 | if (!ignoreSpareRestrictions && // we have to honor spare restrictions | |
841 | !TheSpareAllowanceGiver.readyNow(*this) && // all new spares must wait | |
842 | destinations->haveSpare(*currentPeer)) { // and we do have a new spare | |
843 | TheSpareAllowanceGiver.enqueue(*this); | |
844 | spareWaiting.forSpareAllowance = true; | |
845 | return; | |
846 | } | |
847 | ||
848 | if (auto dest = destinations->extractSpare(*currentPeer)) { | |
849 | ||
850 | if (!ignoreSpareRestrictions) { | |
851 | TheSpareAllowanceGiver.jobGotInstantAllowance(); | |
852 | gotSpareAllowance = true; | |
853 | } | |
854 | ||
855 | startConnecting(spare, dest); | |
856 | return; | |
857 | } | |
858 | ||
859 | // wait for more spare paths or their exhaustion | |
860 | } | |
861 | ||
862 | /// Check for maximum connection tries and forwarding time restrictions | |
863 | bool | |
864 | HappyConnOpener::ranOutOfTimeOrAttempts() const | |
865 | { | |
866 | if (ranOutOfTimeOrAttemptsEarlier_) | |
867 | return true; | |
868 | ||
869 | if (n_tries >= Config.forward_max_tries) { | |
870 | debugs(17, 5, "maximum allowed tries exhausted"); | |
871 | ranOutOfTimeOrAttemptsEarlier_ = "maximum tries"; | |
872 | return true; | |
873 | } | |
874 | ||
875 | if (FwdState::ForwardTimeout(fwdStart) <= 0) { | |
876 | debugs(17, 5, "forwarding timeout"); | |
877 | ranOutOfTimeOrAttemptsEarlier_ = "forwarding timeout"; | |
878 | return true; | |
879 | } | |
880 | ||
881 | return false; | |
882 | } | |
883 | ||
a4d576de EB |
884 | void |
885 | HappyConnOpener::Attempt::cancel(const char *reason) | |
886 | { | |
887 | if (connector) { | |
888 | connector->cancel(reason); | |
889 | CallJobHere(17, 3, opener, Comm::ConnOpener, noteAbort); | |
890 | } | |
891 | clear(); | |
892 | } | |
893 |