AM_PROG_CC_C_O
dnl Before autoconf 2.70, AC_PROG_CC_C99 is supposedly necessary for some
-dnl compilers if you wan't C99 support. Starting with 2.70, it is obsolete and
+dnl compilers if you want C99 support. Starting with 2.70, it is obsolete and
dnl forbidden.
m4_version_prereq([2.70], [:], [AC_PROG_CC_C99])
try:
disp_prefix, url_prefix = ISSUE_PREFIX_MAP[prefix]
except KeyError:
- print("Can't figure out URL for {}{}".formt(prefix,bugno),
+ print("Can't figure out URL for {}{}".format(prefix,bugno),
file=sys.stderr)
return "{} {}{}".format(kind, prefix, bugno)
}
/**
- * Free a congestion control object and its asssociated state.
+ * Free a congestion control object and its associated state.
*/
void
congestion_control_free_(congestion_control_t *cc)
* Process a stream XOFF, parsing it, and then stopping reading on
* the edge connection.
*
- * Record that we have recieved an xoff, so we know not to resume
+ * Record that we have received an xoff, so we know not to resume
* reading on this edge conn until we get an XON.
*
* Returns false if the XOFF did not validate; true if it does.
*
* To handle the case where the local orconn blocks, TOR_NOLA uses
* the 'piecewise' BDP estimate, which uses more a conservative BDP
-* estimate method when blocking occurrs, but a more aggressive BDP
+* estimate method when blocking occurs, but a more aggressive BDP
* estimate when there is no local blocking. This minimizes local
* client queues.
*/
CC_ALG_SENDME = 0,
/**
- * Prop#324 TOR_WESTWOOD - Deliberately agressive. Westwood may not even
+ * Prop#324 TOR_WESTWOOD - Deliberately aggressive. Westwood may not even
* converge to fairness in some cases because max RTT will also increase
- * on congesgtion, which boosts the Westwood RTT congestion threshhold. So it
+ * on congestion, which boosts the Westwood RTT congestion threshold. So it
* can cause runaway queue bloat, which may or may not lead to a robot
* uprising... Ok that's Westworld, not Westwood. Still, we need to test
- * Vegas and NOLA against something more agressive to ensure they do not
+ * Vegas and NOLA against something more aggressive to ensure they do not
* starve in the presence of cheaters. We also need to make sure cheaters
* trigger the oomkiller in those cases.
*/
/**
* Prop#324 TOR_VEGAS - TCP Vegas-style BDP tracker. Because Vegas backs off
- * whenever it detects queue delay, it can be beaten out by more agressive
+ * whenever it detects queue delay, it can be beaten out by more aggressive
* algs. However, in live network testing, it seems to do just fine against
* current SENDMEs. It outperforms Westwood and does not stall. */
CC_ALG_VEGAS = 2,
/**
* Prop#324: TOR_NOLA - NOLA looks the BDP right in the eye and uses it
* immediately as CWND. No slow start, no other congestion signals, no delay,
- * no bullshit. Like TOR_VEGAS, it also uses agressive BDP estimates, to
+ * no bullshit. Like TOR_VEGAS, it also uses aggressive BDP estimates, to
* avoid out-competition. It seems a bit better throughput than Vegas,
* but its agressive BDP and rapid updates may lead to more queue latency. */
CC_ALG_NOLA = 3,
/**
* For steady-state: the number of sendme acks until we will acknowledge
* a congestion event again. It starts out as the number of sendme acks
- * in a congestion windowm and is decremented each ack. When this reaches
+ * in a congestion window and is decremented each ack. When this reaches
* 0, it means we should examine our congestion algorithm conditions.
* In this way, we only react to one congestion event per congestion window.
*
bdp_alg_t bdp_alg;
/** Algorithm-specific parameters. The specific struct that is used
- * depends upon the algoritghm selected by the cc_alg parameter.
+ * depends upon the algorithm selected by the cc_alg parameter.
* These should not be accessed anywhere other than the algorithm-specific
* files. */
union {
};
/**
- * Returns the number of sendme acks we will recieve before we update cwnd.
+ * Returns the number of sendme acks we will receive before we update cwnd.
*
* Congestion control literature recommends only one update of cwnd per
* cwnd worth of acks. However, we can also tune this to be more frequent
}
/**
- * Return the RTT threshhold that signals congestion.
+ * Return the RTT threshold that signals congestion.
*
* Computed from the threshold parameter that specifies a
- * percent between the min and max RTT obseved so far.
+ * percent between the min and max RTT observed so far.
*/
static inline uint64_t
westwood_rtt_signal(const congestion_control_t *cc)
westwood_rtt_signal(cc) - cc->min_rtt_usec < USEC_ONE_MS)
return false;
- /* If the EWMA-smoothed RTT exceeds the westwood RTT threshhold,
+ /* If the EWMA-smoothed RTT exceeds the westwood RTT threshold,
* then it is congestion. */
if (cc->ewma_rtt_usec > westwood_rtt_signal(cc))
return true;
* Process a SENDME and update the congestion window according to the
* rules specified in TOR_WESTWOOD of Proposal #324.
*
- * Essentially, this algorithm uses a threshhold of 'rtt_thresh', which
+ * Essentially, this algorithm uses a threshold of 'rtt_thresh', which
* is a midpoint between the min and max RTT. If the RTT exceeds this
- * threshhold, then queue delay due to congestion is assumed to be present,
- * and the algirithm reduces the congestion window. If the RTT is below the
- * threshhold, the circuit is not congested (ie: queue delay is low), and we
+ * threshold, then queue delay due to congestion is assumed to be present,
+ * and the algorithm reduces the congestion window. If the RTT is below the
+ * threshold, the circuit is not congested (ie: queue delay is low), and we
* increase the congestion window.
*
* The congestion window is updated only once every congestion window worth of
/* Assess connect counter. Mark it if counter is down to 0 and we haven't
* marked it before or it was reset. This is to avoid to re-mark it over and
- * over again extending continously the blocked time. */
+ * over again extending continuously the blocked time. */
if (token_bucket_ctr_get(&stats->connect_count) == 0 &&
stats->marked_until_ts == 0) {
conn_mark_client(stats);
/**
* The following fields are used to count the total bytes sent on this
- * stream, and compare them to the number of XON and XOFFs recieved, so
+ * stream, and compare them to the number of XON and XOFFs received, so
* that clients can check rate limits of XOFF/XON to prevent dropmark
* attacks. */
uint32_t total_bytes_xmit;
int data_pending;
/**
- * Monotime timestamp of when the other end should have successfuly
+ * Monotime timestamp of when the other end should have successfully
* shut down the stream and stop sending data, based on the larger
* of circuit RTT and CBT. Used if 'used_ccontrol' is true, to expire
* the half_edge at this monotime timestamp. */
* Flow Control
*/
-/* Emitted everytime the flow_control_decide_xon() function is called. */
+/* Emitted every time the flow_control_decide_xon() function is called. */
TRACEPOINT_EVENT(tor_cc, flow_decide_xon,
TP_ARGS(const edge_connection_t *, stream, size_t, n_written),
TP_FIELDS(
}
} else {
/*
- * We shouldn't see this, but what the hell, NULLs precede everythin
+ * We shouldn't see this, but what the hell, NULLs precede everything
* else
*/
return 1;
}
/**
- * Create a new signing key and (optionally) certficiate; do not read or write
+ * Create a new signing key and (optionally) certificate; do not read or write
* from disk. See ed_key_init_from_file() for more information.
*/
ed25519_keypair_t *
/** Return true iff port p1 is equal to p2.
*
- * This does a field by field comparaison. */
+ * This does a field by field comparison. */
static bool
port_cfg_eq(const port_cfg_t *p1, const port_cfg_t *p2)
{
tor_assert(fmt);
STRMAP_FOREACH(store->entries, key, const smartlist_t *, entries) {
- /* Indicate that we've formatted the coment already for the entries. */
+ /* Indicate that we've formatted the comment already for the entries. */
bool comment_formatted = false;
SMARTLIST_FOREACH_BEGIN(entries, const metrics_store_entry_t *, entry) {
fmt(entry, data, comment_formatted);
{
// recursive SRW locks are not supported because they need extra logic for
// acquiring and releasing but SleepConditionVariableSRW will use the OS
- // lock relase function which lacks our extra logic
+ // lock release function which lacks our extra logic
tor_assert(lock_->type == NON_RECURSIVE);
SRWLOCK *lock = &lock_->mutex;
DWORD ms = INFINITE;