import psycopg
from psycopg.pq import TransactionStatus
-from .test_pool import delay_connection
+from .test_pool import delay_connection, ensure_waiting
try:
from psycopg_pool import NullConnectionPool
(pid2,) = cur.fetchone() # type: ignore[misc]
with p.connection() as conn:
- assert conn.pgconn.backend_pid not in (pid1, pid2)
+ assert conn.info.backend_pid not in (pid1, pid2)
def test_context(dsn):
assert resets == 1
with conn.execute("show timezone") as cur:
assert cur.fetchone() == ("UTC",)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with NullConnectionPool(dsn, max_size=1, reset=reset) as p:
with p.connection() as conn:
# instead of making a new one.
t = Thread(target=worker)
t.start()
+ ensure_waiting(p)
assert resets == 0
conn.execute("set timezone to '+2:00'")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
t.join()
p.wait()
def worker():
with p.connection() as conn:
conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with NullConnectionPool(dsn, max_size=1, reset=reset) as p:
with p.connection() as conn:
t = Thread(target=worker)
t.start()
+ ensure_waiting(p)
conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
t.join()
def worker():
with p.connection() as conn:
conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with NullConnectionPool(dsn, max_size=1, reset=reset) as p:
with p.connection() as conn:
t = Thread(target=worker)
t.start()
+ ensure_waiting(p)
conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
t.join()
def worker():
with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
assert not conn.execute(
"select 1 from pg_class where relname = 'test_intrans_rollback'"
).fetchone()
# of making a new one.
t = Thread(target=worker)
t.start()
+ ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
conn.execute("create table test_intrans_rollback ()")
- assert conn.pgconn.transaction_status == TransactionStatus.INTRANS
+ assert conn.info.transaction_status == TransactionStatus.INTRANS
p.putconn(conn)
t.join()
def worker():
with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
with NullConnectionPool(dsn, max_size=1) as p:
conn = p.getconn()
# of making a new one.
t = Thread(target=worker)
t.start()
+ ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with pytest.raises(psycopg.ProgrammingError):
conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
p.putconn(conn)
t.join()
def worker():
with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
with NullConnectionPool(dsn, max_size=1) as p:
conn = p.getconn()
t = Thread(target=worker)
t.start()
+ ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
- cur = conn.cursor()
- with cur.copy("copy (select * from generate_series(1, 10)) to stdout"):
- pass
- assert conn.pgconn.transaction_status == TransactionStatus.ACTIVE
+ pids.append(conn.info.backend_pid)
+ conn.pgconn.exec_(
+ b"copy (select * from generate_series(1, 10)) to stdout"
+ )
+ assert conn.info.transaction_status == TransactionStatus.ACTIVE
p.putconn(conn)
t.join()
def worker(p):
with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
with NullConnectionPool(dsn, max_size=1) as p:
conn = p.getconn()
t = Thread(target=worker, args=(p,))
t.start()
+ ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with pytest.raises(psycopg.ProgrammingError):
conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
p.putconn(conn)
t.join()
t2 = Thread(target=w2)
t2.start()
# Wait until w2 is in the queue
- while not p._waiting:
- sleep(0)
+ ensure_waiting(p)
p.close(0)
def worker(p):
with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
sleep(0.1)
ts = []
import psycopg
from psycopg.pq import TransactionStatus
from psycopg._compat import create_task
-from .test_pool_async import delay_connection
+from .test_pool_async import delay_connection, ensure_waiting
pytestmark = [
pytest.mark.asyncio,
(pid2,) = await cur.fetchone() # type: ignore[misc]
async with p.connection() as conn:
- assert conn.pgconn.backend_pid not in (pid1, pid2)
+ assert conn.info.backend_pid not in (pid1, pid2)
async def test_context(dsn):
assert resets == 1
cur = await conn.execute("show timezone")
assert (await cur.fetchone()) == ("UTC",)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
async with AsyncNullConnectionPool(dsn, max_size=1, reset=reset) as p:
async with p.connection() as conn:
# Queue the worker so it will take the same connection a second time
# instead of making a new one.
t = create_task(worker())
+ await ensure_waiting(p)
assert resets == 0
await conn.execute("set timezone to '+2:00'")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
await asyncio.gather(t)
await p.wait()
async def worker():
async with p.connection() as conn:
await conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
async with AsyncNullConnectionPool(dsn, max_size=1, reset=reset) as p:
async with p.connection() as conn:
t = create_task(worker())
+ await ensure_waiting(p)
await conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
await asyncio.gather(t)
async def worker():
async with p.connection() as conn:
await conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
async with AsyncNullConnectionPool(dsn, max_size=1, reset=reset) as p:
async with p.connection() as conn:
t = create_task(worker())
+ await ensure_waiting(p)
await conn.execute("select 1")
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
await asyncio.gather(t)
async def worker():
async with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
cur = await conn.execute(
"select 1 from pg_class where relname = 'test_intrans_rollback'"
)
# Queue the worker so it will take the connection a second time instead
# of making a new one.
t = create_task(worker())
+ await ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
await conn.execute("create table test_intrans_rollback ()")
- assert conn.pgconn.transaction_status == TransactionStatus.INTRANS
+ assert conn.info.transaction_status == TransactionStatus.INTRANS
await p.putconn(conn)
await asyncio.gather(t)
async def worker():
async with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
async with AsyncNullConnectionPool(dsn, max_size=1) as p:
conn = await p.getconn()
t = create_task(worker())
+ await ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with pytest.raises(psycopg.ProgrammingError):
await conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
await p.putconn(conn)
await asyncio.gather(t)
async def worker():
async with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
async with AsyncNullConnectionPool(dsn, max_size=1) as p:
conn = await p.getconn()
t = create_task(worker())
+ await ensure_waiting(p)
- pids.append(conn.pgconn.backend_pid)
- cur = conn.cursor()
- async with cur.copy(
- "copy (select * from generate_series(1, 10)) to stdout"
- ):
- pass
- assert conn.pgconn.transaction_status == TransactionStatus.ACTIVE
+ pids.append(conn.info.backend_pid)
+ conn.pgconn.exec_(
+ b"copy (select * from generate_series(1, 10)) to stdout"
+ )
+ assert conn.info.transaction_status == TransactionStatus.ACTIVE
await p.putconn(conn)
await asyncio.gather(t)
async def worker():
async with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
- assert conn.pgconn.transaction_status == TransactionStatus.IDLE
+ pids.append(conn.info.backend_pid)
+ assert conn.info.transaction_status == TransactionStatus.IDLE
async with AsyncNullConnectionPool(dsn, max_size=1) as p:
conn = await p.getconn()
t = create_task(worker())
+ await ensure_waiting(p)
async def bad_rollback():
conn.pgconn.finish()
orig_rollback = conn.rollback
monkeypatch.setattr(conn, "rollback", bad_rollback)
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
with pytest.raises(psycopg.ProgrammingError):
await conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
await p.putconn(conn)
await asyncio.gather(t)
t2 = create_task(w2())
# Wait until w2 is in the queue
- while not p._waiting:
- await asyncio.sleep(0)
-
+ await ensure_waiting(p)
await p.close()
# Wait for the workers to finish
async def worker():
async with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
await asyncio.sleep(0.1)
async with AsyncNullConnectionPool(dsn, max_size=1, max_lifetime=0.2) as p:
(pid2,) = cur.fetchone() # type: ignore[misc]
with p.connection() as conn:
- assert conn.pgconn.backend_pid in (pid1, pid2)
+ assert conn.info.backend_pid in (pid1, pid2)
def test_context(dsn):
with pool.ConnectionPool(dsn, min_size=1) as p:
with pytest.raises(ZeroDivisionError):
with p.connection() as conn:
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
1 / 0
with p.connection() as conn2:
- assert conn2.pgconn.backend_pid == pid
+ assert conn2.info.backend_pid == pid
@pytest.mark.slow
with pool.ConnectionPool(dsn, min_size=1, reset=reset) as p:
with p.connection() as conn:
conn.execute("select 1")
- pid1 = conn.pgconn.backend_pid
+ pid1 = conn.info.backend_pid
with p.connection() as conn:
conn.execute("select 1")
- pid2 = conn.pgconn.backend_pid
+ pid2 = conn.info.backend_pid
assert pid1 != pid2
assert caplog.records
with pool.ConnectionPool(dsn, min_size=1, reset=reset) as p:
with p.connection() as conn:
conn.execute("select 1")
- pid1 = conn.pgconn.backend_pid
+ pid1 = conn.info.backend_pid
with p.connection() as conn:
conn.execute("select 1")
- pid2 = conn.pgconn.backend_pid
+ pid2 = conn.info.backend_pid
assert pid1 != pid2
assert caplog.records
with pool.ConnectionPool(dsn, min_size=1) as p:
conn = p.getconn()
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
conn.execute("create table test_intrans_rollback ()")
- assert conn.pgconn.transaction_status == TransactionStatus.INTRANS
+ assert conn.info.transaction_status == TransactionStatus.INTRANS
p.putconn(conn)
with p.connection() as conn2:
- assert conn2.pgconn.backend_pid == pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid == pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert not conn2.execute(
"select 1 from pg_class where relname = 'test_intrans_rollback'"
).fetchone()
with pool.ConnectionPool(dsn, min_size=1) as p:
conn = p.getconn()
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
with pytest.raises(psycopg.ProgrammingError):
conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
p.putconn(conn)
with p.connection() as conn2:
- assert conn2.pgconn.backend_pid == pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid == pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert len(caplog.records) == 1
assert "INERROR" in caplog.records[0].message
with pool.ConnectionPool(dsn, min_size=1) as p:
conn = p.getconn()
- pid = conn.pgconn.backend_pid
- cur = conn.cursor()
- with cur.copy("copy (select * from generate_series(1, 10)) to stdout"):
- pass
- assert conn.pgconn.transaction_status == TransactionStatus.ACTIVE
+ pid = conn.info.backend_pid
+ conn.pgconn.exec_(
+ b"copy (select * from generate_series(1, 10)) to stdout"
+ )
+ assert conn.info.transaction_status == TransactionStatus.ACTIVE
p.putconn(conn)
with p.connection() as conn2:
- assert conn2.pgconn.backend_pid != pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid != pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert len(caplog.records) == 2
assert "ACTIVE" in caplog.records[0].message
orig_rollback = conn.rollback
monkeypatch.setattr(conn, "rollback", bad_rollback)
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
with pytest.raises(psycopg.ProgrammingError):
conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
p.putconn(conn)
with p.connection() as conn2:
- assert conn2.pgconn.backend_pid != pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid != pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert len(caplog.records) == 3
assert "INERROR" in caplog.records[0].message
t2 = Thread(target=w2)
t2.start()
# Wait until w2 is in the queue
- while not p._waiting:
- sleep(0)
+ ensure_waiting(p)
p.close(0)
pids = []
for i in range(5):
with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
sleep(0.2)
assert pids[0] == pids[1] != pids[4], pids
with pool.ConnectionPool(dsn, min_size=4) as p:
p.wait(1.0)
with p.connection() as conn:
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
p.wait(1.0)
- pids = set(conn.pgconn.backend_pid for conn in p._pool)
+ pids = set(conn.info.backend_pid for conn in p._pool)
assert pid in pids
conn.close()
p.check()
assert len(caplog.records) == 1
p.wait(1.0)
- pids2 = set(conn.pgconn.backend_pid for conn in p._pool)
+ pids2 = set(conn.info.backend_pid for conn in p._pool)
assert len(pids & pids2) == 3
assert pid not in pids2
connect_orig = psycopg.Connection.connect
monkeypatch.setattr(psycopg.Connection, "connect", connect_delay)
+
+
+def ensure_waiting(p, num=1):
+ """
+ Wait until there are at least *num* clients waiting in the queue.
+ """
+ while len(p._waiting) < num:
+ sleep(0)
(pid2,) = await cur.fetchone() # type: ignore[misc]
async with p.connection() as conn:
- assert conn.pgconn.backend_pid in (pid1, pid2)
+ assert conn.info.backend_pid in (pid1, pid2)
async def test_context(dsn):
async with pool.AsyncConnectionPool(dsn, min_size=1) as p:
with pytest.raises(ZeroDivisionError):
async with p.connection() as conn:
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
1 / 0
async with p.connection() as conn2:
- assert conn2.pgconn.backend_pid == pid
+ assert conn2.info.backend_pid == pid
@pytest.mark.slow
async with pool.AsyncConnectionPool(dsn, min_size=1, reset=reset) as p:
async with p.connection() as conn:
await conn.execute("select 1")
- pid1 = conn.pgconn.backend_pid
+ pid1 = conn.info.backend_pid
async with p.connection() as conn:
await conn.execute("select 1")
- pid2 = conn.pgconn.backend_pid
+ pid2 = conn.info.backend_pid
assert pid1 != pid2
assert caplog.records
async with pool.AsyncConnectionPool(dsn, min_size=1, reset=reset) as p:
async with p.connection() as conn:
await conn.execute("select 1")
- pid1 = conn.pgconn.backend_pid
+ pid1 = conn.info.backend_pid
async with p.connection() as conn:
await conn.execute("select 1")
- pid2 = conn.pgconn.backend_pid
+ pid2 = conn.info.backend_pid
assert pid1 != pid2
assert caplog.records
async with pool.AsyncConnectionPool(dsn, min_size=1) as p:
conn = await p.getconn()
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
await conn.execute("create table test_intrans_rollback ()")
- assert conn.pgconn.transaction_status == TransactionStatus.INTRANS
+ assert conn.info.transaction_status == TransactionStatus.INTRANS
await p.putconn(conn)
async with p.connection() as conn2:
- assert conn2.pgconn.backend_pid == pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid == pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
cur = await conn2.execute(
"select 1 from pg_class where relname = 'test_intrans_rollback'"
)
async with pool.AsyncConnectionPool(dsn, min_size=1) as p:
conn = await p.getconn()
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
with pytest.raises(psycopg.ProgrammingError):
await conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
await p.putconn(conn)
async with p.connection() as conn2:
- assert conn2.pgconn.backend_pid == pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid == pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert len(caplog.records) == 1
assert "INERROR" in caplog.records[0].message
async with pool.AsyncConnectionPool(dsn, min_size=1) as p:
conn = await p.getconn()
- pid = conn.pgconn.backend_pid
- cur = conn.cursor()
- async with cur.copy(
- "copy (select * from generate_series(1, 10)) to stdout"
- ):
- pass
- assert conn.pgconn.transaction_status == TransactionStatus.ACTIVE
+ pid = conn.info.backend_pid
+ conn.pgconn.exec_(
+ b"copy (select * from generate_series(1, 10)) to stdout"
+ )
+ assert conn.info.transaction_status == TransactionStatus.ACTIVE
await p.putconn(conn)
async with p.connection() as conn2:
- assert conn2.pgconn.backend_pid != pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid != pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert len(caplog.records) == 2
assert "ACTIVE" in caplog.records[0].message
orig_rollback = conn.rollback
monkeypatch.setattr(conn, "rollback", bad_rollback)
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
with pytest.raises(psycopg.ProgrammingError):
await conn.execute("wat")
- assert conn.pgconn.transaction_status == TransactionStatus.INERROR
+ assert conn.info.transaction_status == TransactionStatus.INERROR
await p.putconn(conn)
async with p.connection() as conn2:
- assert conn2.pgconn.backend_pid != pid
- assert conn2.pgconn.transaction_status == TransactionStatus.IDLE
+ assert conn2.info.backend_pid != pid
+ assert conn2.info.transaction_status == TransactionStatus.IDLE
assert len(caplog.records) == 3
assert "INERROR" in caplog.records[0].message
t2 = create_task(w2())
# Wait until w2 is in the queue
- while not p._waiting:
- await asyncio.sleep(0)
-
+ await ensure_waiting(p)
await p.close()
# Wait for the workers to finish
pids = []
for i in range(5):
async with p.connection() as conn:
- pids.append(conn.pgconn.backend_pid)
+ pids.append(conn.info.backend_pid)
await asyncio.sleep(0.2)
assert pids[0] == pids[1] != pids[4], pids
async with pool.AsyncConnectionPool(dsn, min_size=4) as p:
await p.wait(1.0)
async with p.connection() as conn:
- pid = conn.pgconn.backend_pid
+ pid = conn.info.backend_pid
await p.wait(1.0)
- pids = set(conn.pgconn.backend_pid for conn in p._pool)
+ pids = set(conn.info.backend_pid for conn in p._pool)
assert pid in pids
await conn.close()
await p.check()
assert len(caplog.records) == 1
await p.wait(1.0)
- pids2 = set(conn.pgconn.backend_pid for conn in p._pool)
+ pids2 = set(conn.info.backend_pid for conn in p._pool)
assert len(pids & pids2) == 3
assert pid not in pids2
connect_orig = psycopg.AsyncConnection.connect
monkeypatch.setattr(psycopg.AsyncConnection, "connect", connect_delay)
+
+
+async def ensure_waiting(p, num=1):
+ while len(p._waiting) < num:
+ await asyncio.sleep(0)