a recent rerun of profiles added more profiling data that's
failing over small differences. 15% variance is fine for these
tests that are looking for thousands of encode calls.
Change-Id: I33dac346b2ff07f86b4bc278a7309ca9b7efbaab
def teardown(self):
metadata.drop_all()
- @profiling.function_call_count()
+ @profiling.function_call_count(variance=0.15)
def test_string(self):
with testing.db.connect().execution_options(
compiled_cache=None
) as conn:
[tuple(row) for row in conn.execute(t.select()).fetchall()]
- @profiling.function_call_count()
+ @profiling.function_call_count(variance=0.15)
def test_unicode(self):
with testing.db.connect().execution_options(
compiled_cache=None