PSYCOPG_TEST_DSN: "host=127.0.0.1 user=postgres"
PGPASSWORD: password
PYTEST_ADDOPTS: --color yes
- # Enable to run tests using the minumum version of dependencies.
+ # Enable to run tests using the minimum version of dependencies.
# PIP_CONSTRAINT: ${{ github.workspace }}/tests/constraints.txt
steps:
@staticmethod
def is_supported() -> bool:
- """Return `!True` if the psycopg libpq wrapper suports pipeline mode."""
+ """Return `!True` if the psycopg libpq wrapper supports pipeline mode."""
if BasePipeline._is_supported is None:
# Support only depends on the libpq functions available in the pq
# wrapper, not on the database version.
) -> None:
"""Process a results set fetched from the current pipeline.
- This matchs 'results' with its respective element in the pipeline
+ This matches 'results' with its respective element in the pipeline
queue. For commands (None value in the pipeline queue), results are
checked directly. For prepare statement creation requests, update the
cache. Otherwise, results are attached to their respective cursor.
self._exit()
except Exception as exc2:
# Notice that this error might be pretty irrecoverable. It
- # happens on COPY, for insance: even if sync succeeds, exiting
+ # happens on COPY, for instance: even if sync succeeds, exiting
# fails with "cannot exit pipeline mode with uncollected results"
if exc_val:
logger.warning("error ignored exiting %r: %s", self, exc2)
# Size of data to accumulate before sending it down the network. We fill a
# buffer this size field by field, and when it passes the threshold size
- # wee ship it, so it may end up being bigger than this.
+ # we ship it, so it may end up being bigger than this.
BUFFER_SIZE = 32 * 1024
# Maximum data size we want to queue to send to the libpq copy. Sending a
first = False
except e.Error as ex:
# try to get out of ACTIVE state. Just do a single attempt, which
- # shoud work to recover from an error or query cancelled.
+ # should work to recover from an error or query cancelled.
if self._pgconn.transaction_status == ACTIVE:
try:
self._conn.wait(self._stream_fetchone_gen(first))
first = False
except e.Error as ex:
# try to get out of ACTIVE state. Just do a single attempt, which
- # shoud work to recover from an error or query cancelled.
+ # should work to recover from an error or query cancelled.
if self._pgconn.transaction_status == pq.TransactionStatus.ACTIVE:
try:
await self._conn.wait(self._stream_fetchone_gen(first))
enc = conn_encoding(self.connection)
label = data.decode(enc, "replace") # type: ignore[union-attr]
raise e.DataError(
- f"bad memeber for enum {self.enum.__qualname__}: {label!r}"
+ f"bad member for enum {self.enum.__qualname__}: {label!r}"
)
m = re.match(r"(.....)\s+(?:E|W|S)\s+ERRCODE_(\S+)(?:\s+(\S+))?$", line)
if m:
sqlstate, macro, spec = m.groups()
- # skip sqlstates without specs as they are not publically visible
+ # skip sqlstates without specs as they are not publicly visible
if not spec:
continue
errlabel = spec.upper()