]> git.ipfire.org Git - thirdparty/psycopg.git/commitdiff
test(pool): add tests to verify check on getconn's behaviour
authorDaniele Varrazzo <daniele.varrazzo@gmail.com>
Fri, 6 Oct 2023 16:31:52 +0000 (18:31 +0200)
committerDaniele Varrazzo <daniele.varrazzo@gmail.com>
Sat, 14 Oct 2023 07:45:36 +0000 (09:45 +0200)
tests/pool/test_pool.py
tests/pool/test_pool_async.py

index fd3d656b3253470cd1606adc7ef3f90f522e5df0..f959203e349734b5df7a2480d8ebaa94323c50d3 100644 (file)
@@ -726,6 +726,116 @@ def test_check_idle(dsn):
             assert conn.info.transaction_status == TransactionStatus.IDLE
 
 
+@pytest.mark.crdb_skip("pg_terminate_backend")
+def test_connect_no_check(dsn):
+    with pool.ConnectionPool(dsn, min_size=2) as p:
+        p.wait(1.0)
+        with p.connection() as conn:
+            with p.connection() as conn2:
+                pid2 = conn2.info.backend_pid
+            conn.execute("select pg_terminate_backend(%s)", [pid2])
+
+        with pytest.raises(psycopg.OperationalError):
+            with p.connection() as conn:
+                conn.execute("select 1")
+                with p.connection() as conn2:
+                    conn2.execute("select 2")
+
+
+@pytest.mark.crdb_skip("pg_terminate_backend")
+@pytest.mark.parametrize("autocommit", [True, False])
+def test_connect_check(dsn, caplog, autocommit):
+    caplog.set_level(logging.WARNING, logger="psycopg.pool")
+
+    with pool.ConnectionPool(
+        dsn,
+        min_size=2,
+        kwargs={"autocommit": autocommit},
+        check=pool.ConnectionPool.check_connection,
+    ) as p:
+        p.wait(1.0)
+        with p.connection() as conn:
+            pid1 = conn.info.backend_pid
+            with p.connection() as conn2:
+                pid2 = conn2.info.backend_pid
+            conn.execute("select pg_terminate_backend(%s)", [pid2])
+
+        with p.connection() as conn:
+            assert conn.info.transaction_status == TransactionStatus.IDLE
+            conn.execute("select 1")
+            with p.connection() as conn2:
+                assert conn2.info.transaction_status == TransactionStatus.IDLE
+                conn2.execute("select 2")
+
+                pids = {c.info.backend_pid for c in [conn, conn2]}
+
+    assert pid1 in pids
+    assert pid2 not in pids
+    assert not caplog.records
+
+
+@pytest.mark.parametrize("autocommit", [True, False])
+@pytest.mark.crdb_skip("pg_terminate_backend")
+def test_getconn_check(dsn, caplog, autocommit):
+    caplog.set_level(logging.WARNING, logger="psycopg.pool")
+
+    with pool.ConnectionPool(
+        dsn,
+        kwargs={"autocommit": autocommit},
+        min_size=2,
+        check=pool.ConnectionPool.check_connection,
+    ) as p:
+        p.wait(1.0)
+        with p.connection() as conn:
+            pid1 = conn.info.backend_pid
+            with p.connection() as conn2:
+                pid2 = conn2.info.backend_pid
+            conn.execute("select pg_terminate_backend(%s)", [pid2])
+
+        conn = p.getconn()
+        try:
+            assert conn.info.transaction_status == TransactionStatus.IDLE
+            conn.execute("select 1")
+            conn.rollback()
+            conn2 = p.getconn()
+            try:
+                assert conn2.info.transaction_status == TransactionStatus.IDLE
+                conn2.execute("select 1")
+                conn2.rollback()
+                pids = {c.info.backend_pid for c in [conn, conn2]}
+            finally:
+                p.putconn(conn2)
+        finally:
+            p.putconn(conn)
+
+    assert pid1 in pids
+    assert pid2 not in pids
+    assert not caplog.records
+
+
+@pytest.mark.slow
+def test_connect_check_timeout(dsn, proxy):
+    proxy.start()
+    with pool.ConnectionPool(
+        proxy.client_dsn,
+        min_size=1,
+        timeout=1.0,
+        check=pool.ConnectionPool.check_connection,
+    ) as p:
+        p.wait()
+
+        proxy.stop()
+        t0 = time()
+        with pytest.raises(pool.PoolTimeout):
+            with p.connection():
+                pass
+        assert 1.0 <= time() - t0 <= 1.1
+
+        proxy.start()
+        with p.connection(timeout=10) as conn:
+            conn.execute("select 1")
+
+
 @pytest.mark.slow
 def test_check_max_lifetime(dsn):
     with pool.ConnectionPool(dsn, min_size=1, max_lifetime=0.2) as p:
@@ -740,7 +850,7 @@ def test_check_max_lifetime(dsn):
 
 
 @pytest.mark.slow
-def test_stats_connect(dsn, proxy, monkeypatch):
+def test_stats_connect(proxy, monkeypatch):
     proxy.start()
     delay_connection(monkeypatch, 0.2)
     with pool.ConnectionPool(proxy.client_dsn, min_size=3) as p:
@@ -760,6 +870,25 @@ def test_stats_connect(dsn, proxy, monkeypatch):
         assert stats["connections_lost"] == 3
 
 
+@pytest.mark.crdb_skip("pg_terminate_backend")
+def test_stats_check(dsn):
+    with pool.ConnectionPool(
+        dsn, min_size=1, check=pool.ConnectionPool.check_connection
+    ) as p:
+        p.wait()
+        with p.connection() as conn:
+            pid = conn.info.backend_pid
+
+        with psycopg.Connection.connect(dsn) as conn:
+            conn.execute("select pg_terminate_backend(%s)", [pid])
+
+        with p.connection() as conn:
+            assert conn.info.backend_pid != pid
+
+        stats = p.get_stats()
+        assert stats["connections_lost"] == 1
+
+
 @pytest.mark.slow
 def test_spike(dsn, monkeypatch):
     # Inspired to https://github.com/brettwooldridge/HikariCP/blob/dev/
index 195fec8186d211ccda53c1cca4765fc12a18ad5d..126abddb90ee4e935dc0ee594e3fc9fdd0e200ea 100644 (file)
@@ -732,6 +732,116 @@ async def test_check_idle(dsn):
             assert conn.info.transaction_status == TransactionStatus.IDLE
 
 
+@pytest.mark.crdb_skip("pg_terminate_backend")
+async def test_connect_no_check(dsn):
+    async with pool.AsyncConnectionPool(dsn, min_size=2) as p:
+        await p.wait(1.0)
+        async with p.connection() as conn:
+            async with p.connection() as conn2:
+                pid2 = conn2.info.backend_pid
+            await conn.execute("select pg_terminate_backend(%s)", [pid2])
+
+        with pytest.raises(psycopg.OperationalError):
+            async with p.connection() as conn:
+                await conn.execute("select 1")
+                async with p.connection() as conn2:
+                    await conn2.execute("select 2")
+
+
+@pytest.mark.crdb_skip("pg_terminate_backend")
+@pytest.mark.parametrize("autocommit", [True, False])
+async def test_connect_check(dsn, caplog, autocommit):
+    caplog.set_level(logging.WARNING, logger="psycopg.pool")
+
+    async with pool.AsyncConnectionPool(
+        dsn,
+        min_size=2,
+        kwargs={"autocommit": autocommit},
+        check=pool.AsyncConnectionPool.check_connection,
+    ) as p:
+        await p.wait(1.0)
+        async with p.connection() as conn:
+            pid1 = conn.info.backend_pid
+            async with p.connection() as conn2:
+                pid2 = conn2.info.backend_pid
+            await conn.execute("select pg_terminate_backend(%s)", [pid2])
+
+        async with p.connection() as conn:
+            assert conn.info.transaction_status == TransactionStatus.IDLE
+            await conn.execute("select 1")
+            async with p.connection() as conn2:
+                assert conn2.info.transaction_status == TransactionStatus.IDLE
+                await conn2.execute("select 2")
+
+                pids = {c.info.backend_pid for c in [conn, conn2]}
+
+    assert pid1 in pids
+    assert pid2 not in pids
+    assert not caplog.records
+
+
+@pytest.mark.parametrize("autocommit", [True, False])
+@pytest.mark.crdb_skip("pg_terminate_backend")
+async def test_getconn_check(dsn, caplog, autocommit):
+    caplog.set_level(logging.WARNING, logger="psycopg.pool")
+
+    async with pool.AsyncConnectionPool(
+        dsn,
+        kwargs={"autocommit": autocommit},
+        min_size=2,
+        check=pool.AsyncConnectionPool.check_connection,
+    ) as p:
+        await p.wait(1.0)
+        async with p.connection() as conn:
+            pid1 = conn.info.backend_pid
+            async with p.connection() as conn2:
+                pid2 = conn2.info.backend_pid
+            await conn.execute("select pg_terminate_backend(%s)", [pid2])
+
+        conn = await p.getconn()
+        try:
+            assert conn.info.transaction_status == TransactionStatus.IDLE
+            await conn.execute("select 1")
+            await conn.rollback()
+            conn2 = await p.getconn()
+            try:
+                assert conn2.info.transaction_status == TransactionStatus.IDLE
+                await conn2.execute("select 1")
+                await conn2.rollback()
+                pids = {c.info.backend_pid for c in [conn, conn2]}
+            finally:
+                await p.putconn(conn2)
+        finally:
+            await p.putconn(conn)
+
+    assert pid1 in pids
+    assert pid2 not in pids
+    assert not caplog.records
+
+
+@pytest.mark.slow
+async def test_connect_check_timeout(dsn, proxy):
+    proxy.start()
+    async with pool.AsyncConnectionPool(
+        proxy.client_dsn,
+        min_size=1,
+        timeout=1.0,
+        check=pool.AsyncConnectionPool.check_connection,
+    ) as p:
+        await p.wait()
+
+        proxy.stop()
+        t0 = time()
+        with pytest.raises(pool.PoolTimeout):
+            async with p.connection():
+                pass
+        assert 1.0 <= (time() - t0) <= 1.1
+
+        proxy.start()
+        async with p.connection(timeout=10) as conn:
+            await conn.execute("select 1")
+
+
 @pytest.mark.slow
 async def test_check_max_lifetime(dsn):
     async with pool.AsyncConnectionPool(dsn, min_size=1, max_lifetime=0.2) as p:
@@ -746,7 +856,7 @@ async def test_check_max_lifetime(dsn):
 
 
 @pytest.mark.slow
-async def test_stats_connect(dsn, proxy, monkeypatch):
+async def test_stats_connect(proxy, monkeypatch):
     proxy.start()
     delay_connection(monkeypatch, 0.2)
     async with pool.AsyncConnectionPool(proxy.client_dsn, min_size=3) as p:
@@ -766,6 +876,25 @@ async def test_stats_connect(dsn, proxy, monkeypatch):
         assert stats["connections_lost"] == 3
 
 
+@pytest.mark.crdb_skip("pg_terminate_backend")
+async def test_stats_check(dsn):
+    async with pool.AsyncConnectionPool(
+        dsn, min_size=1, check=pool.AsyncConnectionPool.check_connection
+    ) as p:
+        await p.wait()
+        async with p.connection() as conn:
+            pid = conn.info.backend_pid
+
+        async with await psycopg.AsyncConnection.connect(dsn) as conn:
+            await conn.execute("select pg_terminate_backend(%s)", [pid])
+
+        async with p.connection() as conn:
+            assert conn.info.backend_pid != pid
+
+        stats = p.get_stats()
+        assert stats["connections_lost"] == 1
+
+
 @pytest.mark.slow
 async def test_spike(dsn, monkeypatch):
     # Inspired to https://github.com/brettwooldridge/HikariCP/blob/dev/