help="Drop all tables in the target database first")
make_option("--backend-only", action="store_true", dest="backend_only",
help="Run only tests marked with __backend__")
+ make_option("--nomemory", action="store_true", dest="nomemory",
+ help="Don't run memory profiling tests")
make_option("--low-connections", action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - "
options = opt
+@pre
+def _set_nomemory(opt, file_config):
+ if opt.nomemory:
+ exclude_tags.add("memory_intensive")
+
+
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import weakref
import itertools
+import multiprocessing
class A(fixtures.ComparableEntity):
pass
else:
return gc.get_objects()
- def profile(*args):
+ def profile(queue, func_args):
+ # give testing.db a brand new pool and don't
+ # touch the existing pool, since closing a socket
+ # in the subprocess can affect the parent
+ testing.db.pool = testing.db.pool.recreate()
+
gc_collect()
samples = []
max_ = 0
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
- func(*args)
+ func(*func_args)
gc_collect()
samples.append(
get_num_objects() if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
- # note: this prints lots of text, and when using pytest-xdist,
- # actually interferes with memory itself from just sending
- # the stdout between processes :).
- # need to figure out a "condiional print" that doesn't send
- # any stdout when we have pytest-xdist happening
- # print("sample gc sizes:", samples)
-
if assert_no_sessions:
assert len(_sessions) == 0
+ # queue.put(('samples', samples))
+
latest_max = max(samples[-5:])
if latest_max > max_:
- print(
- "Max grew from %s to %s, max has "
- "grown for %s samples" % (
- max_, latest_max, max_grew_for
+ queue.put(
+ (
+ 'status',
+ "Max grew from %s to %s, max has "
+ "grown for %s samples" % (
+ max_, latest_max, max_grew_for
+ )
)
)
max_ = latest_max
until_maxtimes += 1
continue
else:
- print("Max remained at %s, %s more attempts left" %
- (max_, max_grew_for))
+ queue.put(
+ (
+ 'status',
+ "Max remained at %s, %s more attempts left" %
+ (max_, max_grew_for)
+ )
+ )
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
if not success:
- assert False, \
- "Ran for a total of %d times, memory kept growing: %r" % (
- maxtimes,
- samples
+ queue.put(
+ (
+ 'result',
+ False,
+ "Ran for a total of %d times, memory kept "
+ "growing: %r" % (
+ maxtimes,
+ samples
+ )
)
+ )
+
+ else:
+ queue.put(
+ ('result', True, 'success')
+ )
+
+ def run_in_process(*func_args):
+ queue = multiprocessing.Queue()
+ proc = multiprocessing.Process(
+ target=profile, args=(queue, func_args))
+ proc.start()
+ while True:
+ row = queue.get()
+ typ = row[0]
+ if typ == 'samples':
+ print("sample gc sizes:", row[1])
+ elif typ == 'status':
+ print(row[1])
+ elif typ == 'result':
+ break
+ else:
+ assert False, "can't parse row"
+ proc.join()
+ assert row[1], row[2]
+
+ return run_in_process
- assert success
- return profile
return decorate
assert not eng.dialect._type_memos
+ @testing.fails()
+ def test_fixture_failure(self):
+ class Foo(object):
+ pass
+ stuff = []
+
+ @profile_memory(maxtimes=20)
+ def go():
+ stuff.extend(
+ Foo() for i in range(100)
+ )
+ go()
+
class MemUsageWBackendTest(EnsureZeroed):
target_strings = session.connection().\
dialect.identifier_preparer._strings
- with session.transaction:
- @profile_memory(
- assert_no_sessions=False,
- get_num_objects=lambda: len(target_strings))
- def go():
+ session.close()
+
+ @profile_memory(
+ assert_no_sessions=False,
+ get_num_objects=lambda: len(target_strings)
+ )
+ def go():
+ session = Session(testing.db)
+ with session.transaction:
sc = SomeClass()
session.add(sc)
-
with session.begin_nested():
session.query(SomeClass).first()
- go()
+ go()
@testing.crashes('mysql+cymysql', 'blocking')
def test_unicode_warnings(self):
WORKERS={env:WORKERS:-n4}
oracle: WORKERS={env:WORKERS:-n2}
nocext: DISABLE_SQLALCHEMY_CEXT=1
- nomemory: NOMEMORY=--exclude-tag memory-intensive
cov: COVERAGE={[testenv]cov_args}
sqlite: SQLITE={env:SQLITE:--db sqlite}
postgresql: POSTGRESQL={env:POSTGRESQL:--db postgresql}
mysql: MYSQL={env:MYSQL:--db mysql --db pymysql}
- oracle: ORACLE={env:ORACLE:--db oracle} --write-idents oracle_idents.txt --exclude-tag memory-intensive
+ oracle: ORACLE={env:ORACLE:--db oracle} --write-idents oracle_idents.txt --nomemory
mssql: MSSQL={env:MSSQL:--db pyodbc --db pymssql}
backendonly: BACKENDONLY=--backend-only
# for nocext, we rm *.so in lib in case we are doing usedevelop=True
commands=
{nocext}: sh -c "rm -f lib/sqlalchemy/*.so"
- {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:NOMEMORY:} {env:COVERAGE:} {posargs}
+ {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:COVERAGE:} {posargs}
{oracle}: python reap_oracle_dbs.py oracle_idents.txt