.. autoattribute:: name
.. autoattribute:: scrollable
- .. seealso:: The PostgreSQL DECLARE_ statement documetation
+ .. seealso:: The PostgreSQL DECLARE_ statement documentation
for the description of :sql:`[NO] SCROLL`.
.. autoattribute:: withhold
- .. seealso:: The PostgreSQL DECLARE_ statement documetation
+ .. seealso:: The PostgreSQL DECLARE_ statement documentation
for the description of :sql:`{WITH|WITHOUT} HOLD`.
.. _DECLARE: https://www.postgresql.org/docs/current/sql-declare.html
.. module:: psycopg._dns
-This module contains a few experimental utilities to interact wit the DNS
+This module contains a few experimental utilities to interact with the DNS
server before performing a connection.
.. warning::
``pool-2``, etc.
:type name: `!str`
- :param timeout: The default maximum time in seconts that a client can wait
+ :param timeout: The default maximum time in seconds that a client can wait
to receive a connection from the pool (using `connection()`
or `getconn()`). Note that these methods allow to override
the *timeout* default.
Added *open* parameter to init method.
- .. note:: In a future version, the deafult value for the *open* parameter
+ .. note:: In a future version, the default value for the *open* parameter
might be changed to `!False`. If you rely on this behaviour (e.g. if
you don't use the pool as a context manager) you might want to specify
this parameter explicitly.
if logger.level == logging.NOTSET:
logger.setLevel(logging.WARNING)
-# DBAPI compliancy
+# DBAPI compliance
connect = Connection.connect
apilevel = "2.0"
threadsafety = 2
@property
def internal_size(self) -> Optional[int]:
- """The interal field size for fixed-size types, None otherwise."""
+ """The internal field size for fixed-size types, None otherwise."""
fsize = self._data.fsize
return fsize if fsize >= 0 else None
ans = await async_resolver.resolve(host)
except DNSException as ex:
# Special case localhost: on MacOS it doesn't get resolved.
- # I assue it is just resolved by /etc/hosts, which is not handled
+ # I assume it is just resolved by /etc/hosts, which is not handled
# by dnspython.
if host == "localhost":
hosts_out.append(host)
) -> None:
"""Validate cached entry with 'key' by checking query 'results'.
- Possibly return a command to perform maintainance on database side.
+ Possibly return a command to perform maintenance on database side.
Note: this method is only called in pipeline mode.
"""
size of int?...)
In these cases, a dumper can implement `!get_key()` and return a new
- class, or sequence of classes, that can be used to indentify the same
+ class, or sequence of classes, that can be used to identify the same
dumper again. If the mechanism is not needed, the method should return
the same *cls* object passed in the constructor.
# to the connections subclass, which might wait either in blocking mode
# or through asyncio.
#
- # All these generators assume exclusive acces to the connection: subclasses
+ # All these generators assume exclusive access to the connection: subclasses
# should have a lock and hold it before calling and consuming them.
@classmethod
self._iresult = i
res = self.pgresult = self._results[i]
- # Note: the only reason to override format is to correclty set
+ # Note: the only reason to override format is to correctly set
# binary loaders on server-side cursors, because send_describe_portal
# only returns a text result.
self._tx.set_pgresult(res, format=format)
PGcancel = module.PGcancel
__build_version__ = getattr(module, "__build_version__", None)
elif impl:
- raise ImportError(f"requested psycopg impementation '{impl}' unknown")
+ raise ImportError(f"requested psycopg implementation '{impl}' unknown")
else:
sattempts = "\n".join(f"- {attempt}" for attempt in attempts)
raise ImportError(
r"""Row factory to represent rows as simple tuples.
This is the default factory, used when `~psycopg.Connection.connect()` or
- `~psycopg.Connection.cursor()` are called withouth a `!row_factory`
+ `~psycopg.Connection.cursor()` are called without a `!row_factory`
parameter.
"""
Use this function only if you absolutely want to convert a Python string to
an SQL quoted literal to use e.g. to generate batch SQL and you won't have
- a connection avaliable when you will need to use it.
+ a connection available when you will need to use it.
This function is relatively inefficient, because it doesn't cache the
adaptation rules. If you pass a *context* you can adapt the adaptation
(?: # lower bound:
" ( (?: [^"] | "")* ) " # - a quoted string
| ( [^",]+ ) # - or an unquoted string
- )? # - or empty (not catched)
+ )? # - or empty (not caught)
,
(?: # upper bound:
" ( (?: [^"] | "")* ) " # - a quoted string
| ( [^"\)\]]+ ) # - or an unquoted string
- )? # - or empty (not catched)
+ )? # - or empty (not caught)
( \)|\] ) # upper bound flag
""",
re.VERBOSE,
"""
Dumper for strings in text format to the text oid.
- Note that this dumper is not used by deafult because the type is too strict
+ Note that this dumper is not used by default because the type is too strict
and PostgreSQL would require an explicit casts to everything that is not a
text field. However it is useful where the unknown oid is ambiguous and the
text oid is required, for instance with variadic functions.
The function interface allows C code to use this method automatically
to create larger buffers, e.g. for copy, composite objects, etc.
- Implementation note: as you will alway need to make sure that rv
+ Implementation note: as you will always need to make sure that rv
has enough space to include what you want to dump, `ensure_size()`
might probably come handy.
"""
) except *:
cdef int i
- # the PostgresQuery convers the param_types to tuple, so this operation
+ # the PostgresQuery converts the param_types to tuple, so this operation
# is most often no-op
cdef tuple tparam_types
if param_types is not None and not isinstance(param_types, tuple):
format = PQ_TEXT
oid = oids.BYTEA_OID
- # 0: not set, 1: just single "'" quote, 3: " E'" qoute
+ # 0: not set, 1: just single "'" quote, 3: " E'" quote
cdef int _qplen
def __cinit__(self):
elif self.max_waiting and len(self._waiting) >= self.max_waiting:
self._stats[self._REQUESTS_ERRORS] += 1
raise TooManyRequests(
- f"the pool {self.name!r} has aleady"
+ f"the pool {self.name!r} has already"
f" {len(self._waiting)} requests waiting"
)
return conn
if self._pool_full_event:
self._pool_full_event.set()
else:
- # The connection created by wait shoudn't decrease the
+ # The connection created by wait shouldn't decrease the
# count of the number of connection used.
self._nconns -= 1
elif self.max_waiting and len(self._waiting) >= self.max_waiting:
self._stats[self._REQUESTS_ERRORS] += 1
raise TooManyRequests(
- f"the pool {self.name!r} has aleady"
+ f"the pool {self.name!r} has already"
f" {len(self._waiting)} requests waiting"
)
return conn
if self._pool_full_event:
self._pool_full_event.set()
else:
- # The connection created by wait shoudn't decrease the
+ # The connection created by wait shouldn't decrease the
# count of the number of connection used.
self._nconns -= 1
def getconn(self, timeout: Optional[float] = None) -> Connection[Any]:
"""Obtain a connection from the pool.
- You should preferrably use `connection()`. Use this function only if
+ You should preferably use `connection()`. Use this function only if
it is not possible to use the connection as context manager.
After using this function you *must* call a corresponding `putconn()`:
elif self.max_waiting and len(self._waiting) >= self.max_waiting:
self._stats[self._REQUESTS_ERRORS] += 1
raise TooManyRequests(
- f"the pool {self.name!r} has aleady"
+ f"the pool {self.name!r} has already"
f" {len(self._waiting)} requests waiting"
)
return conn
# Don't make all the workers time out at the same moment
timeout = cls._jitter(cls._WORKER_TIMEOUT, -0.1, 0.1)
while True:
- # Use a timeout to make the wait interruptable
+ # Use a timeout to make the wait interruptible
try:
task = q.get(timeout=timeout)
except Empty:
elif self.max_waiting and len(self._waiting) >= self.max_waiting:
self._stats[self._REQUESTS_ERRORS] += 1
raise TooManyRequests(
- f"the pool {self.name!r} has aleady"
+ f"the pool {self.name!r} has already"
f" {len(self._waiting)} requests waiting"
)
return conn
@contextmanager
def find_insert_problem(self, conn):
- """Context manager to help finding a problematic vaule."""
+ """Context manager to help finding a problematic value."""
try:
yield
except psycopg.DatabaseError:
...and exiting the context successfully will "commit" the same.
"""
# Case 1
- # Using Transaction explicitly becase conn.transaction() enters the contetx
+ # Using Transaction explicitly because conn.transaction() enters the contetx
assert not commands
with conn.transaction() as tx:
assert commands.popall() == ["BEGIN"]
commands = acommands
# Case 1
- # Using Transaction explicitly becase conn.transaction() enters the contetx
+ # Using Transaction explicitly because conn.transaction() enters the contetx
async with aconn.transaction() as tx:
assert commands.popall() == ["BEGIN"]
assert not tx.savepoint_name