def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
- parent_process().join(timeout=5)
+ parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
import os, sys, time, unittest
import threading
-import test.support as support
+from test import support
LONGSLEEP = 2
self.threads.append(thread)
# busy-loop to wait for threads
- deadline = time.monotonic() + 10.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
while len(self.alive) < NUM_THREADS:
time.sleep(0.1)
if deadline < time.monotonic():
import sys
import time
import unittest
+from test import support
class SIGUSR1Exception(Exception):
# (if set)
child.wait()
- timeout = 10.0
+ timeout = support.SHORT_TIMEOUT
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
- timeout=10)
+ timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
- timeout=10)
+ timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
return len(data)
write_transport.write(b'1')
- test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
+ test_utils.run_until(self.loop, lambda: reader(data) >= 1,
+ timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
- timeout=10)
+ timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
- test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
+ test_utils.run_until(self.loop, lambda: reader(data) >= 5,
+ timeout=support.SHORT_TIMEOUT)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
- timeout=10)
+ timeout=support.SHORT_TIMEOUT)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
- asyncio.wait_for(client(srv.addr), timeout=10))
+ asyncio.wait_for(client(srv.addr),
+ timeout=support.SHORT_TIMEOUT))
# No garbage is left if SSL is closed uncleanly
client_context = weakref.ref(client_context)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
- asyncio.wait_for(client(srv.addr), timeout=10))
+ asyncio.wait_for(client(srv.addr),
+ timeout=support.SHORT_TIMEOUT))
# No garbage is left for SSL client from loop.create_connection, even
# if user stores the SSLTransport in corresponding protocol instance
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
- asyncio.wait_for(client(srv.addr), timeout=10))
+ asyncio.wait_for(client(srv.addr),
+ timeout=support.SHORT_TIMEOUT))
def test_start_tls_server_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
*addr,
ssl=client_sslctx,
server_hostname='',
- ssl_handshake_timeout=10.0),
+ ssl_handshake_timeout=support.SHORT_TIMEOUT),
0.5)
with self.tcp_server(server,
gen.close()
-def run_until(loop, pred, timeout=30):
+def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
deadline = time.monotonic() + timeout
while not pred():
if timeout is not None:
-import test.support
+from test import support
# Skip tests if _multiprocessing wasn't built.
-test.support.import_module('_multiprocessing')
+support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
-test.support.import_module('multiprocessing.synchronize')
+support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok
class BaseTestCase(unittest.TestCase):
def setUp(self):
- self._thread_key = test.support.threading_setup()
+ self._thread_key = support.threading_setup()
def tearDown(self):
- test.support.reap_children()
- test.support.threading_cleanup(*self._thread_key)
+ support.reap_children()
+ support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
self.executor = None
dt = time.monotonic() - self.t1
- if test.support.verbose:
+ if support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
- @test.support.cpython_only
+ @support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
self.executor.submit(my_object.my_method)
del my_object
- collected = my_object_collected.wait(timeout=5.0)
+ collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
- with test.support.captured_stderr() as f1:
+ with support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
class ExecutorDeadlockTest:
- TIMEOUT = 15
+ TIMEOUT = support.SHORT_TIMEOUT
@classmethod
def _sleep_id(cls, x, delay):
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
- with test.support.captured_stderr():
+ with support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
- with test.support.captured_stderr() as stderr:
+ with support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
- with test.support.captured_stderr() as stderr:
+ with support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
t = threading.Thread(target=notification)
t.start()
- self.assertRaises(futures.CancelledError, f1.result, timeout=5)
+ self.assertRaises(futures.CancelledError,
+ f1.result, timeout=support.SHORT_TIMEOUT)
t.join()
def test_exception_with_timeout(self):
t = threading.Thread(target=notification)
t.start()
- self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
+ self.assertTrue(isinstance(f1.exception(timeout=support.SHORT_TIMEOUT), OSError))
t.join()
def test_multiple_set_result(self):
def setUpModule():
global _threads_key
- _threads_key = test.support.threading_setup()
+ _threads_key = support.threading_setup()
def tearDownModule():
- test.support.threading_cleanup(*_threads_key)
- test.support.reap_children()
+ support.threading_cleanup(*_threads_key)
+ support.reap_children()
# cleanup multiprocessing
multiprocessing.process._cleanup()
# bpo-37421: Explicitly call _run_finalizers() to remove immediately
# temporary directories created by multiprocessing.util.get_temp_dir().
multiprocessing.util._run_finalizers()
- test.support.gc_collect()
+ support.gc_collect()
if __name__ == "__main__":
import unittest
from test.fork_wait import ForkWait
-from test.support import reap_children, get_attribute, verbose
+from test import support
# Skip test if fork does not exist.
-get_attribute(os, 'fork')
+support.get_attribute(os, 'fork')
class ForkTest(ForkWait):
def wait_impl(self, cpid):
- deadline = time.monotonic() + 10.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
while time.monotonic() <= deadline:
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
if m == complete_module:
os._exit(0)
else:
- if verbose > 1:
+ if support.verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
imp.release_lock()
except RuntimeError:
if in_child:
- if verbose > 1:
+ if support.verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
def tearDownModule():
- reap_children()
+ support.reap_children()
if __name__ == "__main__":
unittest.main()
self.assertIn('0.0.0.0', serverthread.docserver.address)
starttime = time.monotonic()
- timeout = 1 #seconds
+ timeout = test.support.SHORT_TIMEOUT
while serverthread.serving:
time.sleep(.01)
from test import support
-TIMEOUT = 10
+TIMEOUT = support.SHORT_TIMEOUT
class Timer:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
- stdout, stderr = process.communicate(timeout=5.0)
+ stdout, stderr = process.communicate(timeout=support.SHORT_TIMEOUT)
except subprocess.TimeoutExpired:
process.kill()
return False
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
- deadline = time.monotonic() + 15.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
- deadline = time.monotonic() + 15.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# Remember real select() to avoid interferences with mocking
_real_select = select.select
-def receive(sock, n, timeout=20):
+def receive(sock, n, timeout=test.support.SHORT_TIMEOUT):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
- timeout = kwargs.get('timeout', 10)
+ timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
- # Some heavily loaded buildbots (sparc Debian 3.x) require this much
- # time to start.
- self.assertEqual(p.wait(timeout=3), 0)
+ self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
- proc.wait(timeout=20)
+ proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
os._exit(0)
t0 = time.monotonic()
- deadline = time.monotonic() + 60.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
was_altered = support.environment_altered
try:
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
- worker_saw_exception.wait(timeout=10)
+ worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
- self.assertTrue(tstate_lock.acquire(timeout=5), False)
+ self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
import time
import unittest
from test.fork_wait import ForkWait
-from test.support import reap_children
+from test import support
if not hasattr(os, 'fork'):
raise unittest.SkipTest("os.fork not defined")
# This many iterations can be required, since some previously run
# tests (e.g. test_ctypes) could have spawned a lot of children
# very quickly.
- deadline = time.monotonic() + 10.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
while time.monotonic() <= deadline:
# wait3() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
def tearDownModule():
- reap_children()
+ support.reap_children()
if __name__ == "__main__":
unittest.main()
import sys
import unittest
from test.fork_wait import ForkWait
-from test.support import reap_children, get_attribute
+from test import support
# If either of these do not exist, skip this test.
-get_attribute(os, 'fork')
-get_attribute(os, 'wait4')
+support.get_attribute(os, 'fork')
+support.get_attribute(os, 'wait4')
class Wait4Test(ForkWait):
# Issue #11185: wait4 is broken on AIX and will always return 0
# with WNOHANG.
option = 0
- deadline = time.monotonic() + 10.0
+ deadline = time.monotonic() + support.SHORT_TIMEOUT
while time.monotonic() <= deadline:
# wait4() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
self.assertTrue(rusage)
def tearDownModule():
- reap_children()
+ support.reap_children()
if __name__ == "__main__":
unittest.main()
Replace hardcoded timeout constants in tests with
-:data:`~test.support.LOOPBACK_TIMEOUT` of :mod:`test.support`, so it's easier
-to ajdust this timeout for all tests at once.
+new :mod:`test.support` constants: :data:`~test.support.LOOPBACK_TIMEOUT`,
+:data:`~test.support.INTERNET_TIMEOUT`, :data:`~test.support.SHORT_TIMEOUT` and
+:data:`~test.support.LONG_TIMEOUT`. It becomes easier to adjust these four
+timeout constants for all tests at once, rather than having to adjust every
+single test file.