# off-by-1 error too low. So we add 2 instead of 1 if chopping lost
# a fraction > 0.9.
- # The "WASI" test platfrom can complain about `len(s)` if it's too
+ # The "WASI" test platform can complain about `len(s)` if it's too
# large to fit in its idea of "an index-sized integer".
lenS = s.__len__()
log_ub = lenS * _LOG_10_BASE_256
# ctx.prec = max(n.adjusted() - p256.adjusted(), 0) + GUARD
# hi = +n * +recip # unary `+` chops to ctx.prec digits
#
-# we have 3 visible chopped operationa, but there's also a 4th:
+# we have 3 visible chopped operations, but there's also a 4th:
# precomputing a truncated `recip` as part of setup.
#
# So the computed product is exactly equal to the true product times
# Enable for brute-force testing of compute_powers(). This takes about a
# minute, because it tries millions of cases.
if 0:
- def consumer(w, limir, need_hi):
+ def consumer(w, limit, need_hi):
seen = set()
need = set()
def inner(w):
inner(lo)
inner(hi)
inner(w)
- exp = compute_powers(w, 1, limir, need_hi=need_hi)
+ exp = compute_powers(w, 1, limit, need_hi=need_hi)
assert exp.keys() == need
from itertools import chain
# D E F B E
# G C F
#
- # "fill" the table with empty words, so we always have the same amout
+ # "fill" the table with empty words, so we always have the same amount
# of rows for each column
missing = cols*rows - len(wordlist)
wordlist = wordlist + ['']*missing
try:
poll: type[select.poll] = select.poll
except AttributeError:
- # this is exactly the minumum necessary to support what we
+ # this is exactly the minimum necessary to support what we
# do with poll objects
class MinimalPoll:
def __init__(self):
# reuse the oldline as much as possible, but stop as soon as we
# encounter an ESCAPE, because it might be the start of an escape
- # sequene
+ # sequence
while (
x_coord < minlen
and oldline[x_pos] == newline[x_pos]
# reuse the oldline as much as possible, but stop as soon as we
# encounter an ESCAPE, because it might be the start of an escape
- # sequene
+ # sequence
while (
x_coord < minlen
and oldline[x_pos] == newline[x_pos]
if kw_only_fields:
# Add the keyword-only args. Because the * can only be added if
# there's at least one keyword-only arg, there needs to be a test here
- # (instead of just concatenting the lists together).
+ # (instead of just concatenating the lists together).
_init_params += ['*']
_init_params += [_init_param(f) for f in kw_only_fields]
func_builder.add_fn('__init__',
self._run_tests_mp(runtests, self.num_workers)
else:
# gh-117783: don't immortalize deferred objects when tracking
- # refleaks. Only releveant for the free-threaded build.
+ # refleaks. Only relevant for the free-threaded build.
with suppress_immortalization(runtests.hunt_refleak):
self.run_tests_sequentially(runtests)
pgo = runtests.pgo
try:
# gh-117783: don't immortalize deferred objects when tracking
- # refleaks. Only releveant for the free-threaded build.
+ # refleaks. Only relevant for the free-threaded build.
with support.suppress_immortalization(runtests.hunt_refleak):
_runtest(result, runtests)
except:
# TODO: This module was deprecated and removed from CPython 3.12
-# Now it is a test-only helper. Any attempts to rewrite exising tests that
+# Now it is a test-only helper. Any attempts to rewrite existing tests that
# are using this module and remove it completely are appreciated!
# See: https://github.com/python/cpython/issues/72719
# TODO: This module was deprecated and removed from CPython 3.12
-# Now it is a test-only helper. Any attempts to rewrite exising tests that
+# Now it is a test-only helper. Any attempts to rewrite existing tests that
# are using this module and remove it completely are appreciated!
# See: https://github.com/python/cpython/issues/72719
def assertInstructionsMatch(self, actual_seq, expected):
# get an InstructionSequence and an expected list, where each
- # entry is a label or an instruction tuple. Construct an expcted
+ # entry is a label or an instruction tuple. Construct an expected
# instruction sequence and compare with the one given.
self.assertIsInstance(expected, list)
class NodeTransformerTests(ASTTestMixin, unittest.TestCase):
- def assertASTTransformation(self, tranformer_class,
+ def assertASTTransformation(self, transformer_class,
initial_code, expected_code):
initial_ast = ast.parse(dedent(initial_code))
expected_ast = ast.parse(dedent(expected_code))
- tranformer = tranformer_class()
- result_ast = ast.fix_missing_locations(tranformer.visit(initial_ast))
+ transformer = transformer_class()
+ result_ast = ast.fix_missing_locations(transformer.visit(initial_ast))
self.assertASTEqual(result_ast, expected_ast)
self.assertEqual([2, 3], result)
async def test_acquire_fifo_order_4(self):
- # Test that a successfule `acquire()` will wake up multiple Tasks
+ # Test that a successful `acquire()` will wake up multiple Tasks
# that were waiting in the Semaphore queue due to FIFO rules.
sem = asyncio.Semaphore(0)
result = []
count = 0
async def c1(result):
- # First task immediatlly waits for semaphore. It will be awoken by c2.
+ # First task immediately waits for semaphore. It will be awoken by c2.
self.assertEqual(sem._value, 0)
await sem.acquire()
# We should have woken up all waiting tasks now.
# first time waiting
await barrier.wait()
- # after wainting once for all tasks
+ # after waiting once for all tasks
if rewait_n > 0:
rewait_n -= 1
# wait again only for rewait tasks
def test_subprocess_protocol_events(self):
# gh-108973: Test that all subprocess protocol methods are called.
- # The protocol methods are not called in a determistic order.
+ # The protocol methods are not called in a deterministic order.
# The order depends on the event loop and the operating system.
events = []
fds = [1, 2]
self.assertTrue(exe.is_valid())
# Assert that the correct executors are invalidated
# and check that nothing crashes when we invalidate
- # an executor mutliple times.
+ # an executor multiple times.
for i in (4,3,2,1,0):
_testinternalcapi.invalidate_executors(objects[i])
for exe in executors[i:]:
executor_manager.join()
def test_crash_big_data(self):
- # Test that there is a clean exception instad of a deadlock when a
+ # Test that there is a clean exception instead of a deadlock when a
# child process crashes while some data is being written into the
# queue.
# https://github.com/python/cpython/issues/94777
parser.get_msg_id("<simplelocal@")
def test_get_msg_id_with_brackets(self):
- # Microsof Outlook generates non-standard one-off addresses:
+ # Microsoft Outlook generates non-standard one-off addresses:
# https://learn.microsoft.com/en-us/office/client-developer/outlook/mapi/one-off-addresses
with self.assertRaises(errors.HeaderParseError):
parser.get_msg_id("<[abrakadabra@microsoft.com]>")
def test_double_const(self):
# Importing double_const checks that float constants
- # serialiazed by marshal as PYC files don't lose precision
+ # serialized by marshal as PYC files don't lose precision
# (SF bug 422177).
from test.test_import.data import double_const
unload('test.test_import.data.double_const')
# * alive in 1 interpreter (main)
# * module def still in _PyRuntime.imports.extensions
# * mod init func ran again
- # * m_copy is NULL (claered when the interpreter was destroyed)
+ # * m_copy is NULL (cleared when the interpreter was destroyed)
# (was from main interpreter)
# * module's global state was updated, not reset
# * alive in 0 interpreters
# * module def in _PyRuntime.imports.extensions
# * mod init func ran for the first time (since reset, at least)
- # * m_copy is NULL (claered when the interpreter was destroyed)
+ # * m_copy is NULL (cleared when the interpreter was destroyed)
# * module's global state was initialized, not reset
# Use a subinterpreter that sticks around.
name is the name of the document element type, public_id the
public identifier of the DTD (or None if none were supplied)
- and system_id the system identfier of the external subset (or
+ and system_id the system identifier of the external subset (or
None if none were supplied)."""
def endDTD(self):