There is some good optimisation to be had moving in C the two loops.
f" to format {Format(format).name}"
)
+ def load_rows(self, row0: int, row1: int) -> Sequence[Tuple[Any, ...]]:
+ res = self._pgresult
+ if not res:
+ raise e.InterfaceError("result not set")
+
+ if not (0 <= row0 <= self._ntuples and 0 <= row1 <= self._ntuples):
+ raise e.InterfaceError(
+ f"rows must be included between 0 and {self._ntuples}"
+ )
+
+ records: List[Tuple[Any, ...]]
+ records = [None] * (row1 - row0) # type: ignore[list-item]
+ for row in range(row0, row1):
+ record: List[Any] = [None] * self._nfields
+ for col in range(self._nfields):
+ val = res.get_value(row, col)
+ if val is not None:
+ record[col] = self._row_loaders[col](val)
+ records[row - row0] = tuple(record)
+
+ return records
+
def load_row(self, row: int) -> Optional[Tuple[Any, ...]]:
res = self._pgresult
if not res:
if not 0 <= row < self._ntuples:
return None
- rv: List[Any] = [None] * self._nfields
+ record: List[Any] = [None] * self._nfields
for col in range(self._nfields):
val = res.get_value(row, col)
if val is not None:
- rv[col] = self._row_loaders[col](val)
+ record[col] = self._row_loaders[col](val)
- return tuple(rv)
+ return tuple(record)
def load_sequence(
self, record: Sequence[Optional[bytes]]
Return `!None` the recordset is finished.
"""
self._check_result()
- rv = self._transformer.load_row(self._pos)
- if rv is not None:
+ record = self._transformer.load_row(self._pos)
+ if record is not None:
self._pos += 1
- return rv
+ return record
def fetchmany(self, size: int = 0) -> List[Sequence[Any]]:
"""
*size* default to `!self.arraysize` if not specified.
"""
self._check_result()
- if not size:
- size = self.arraysize
-
assert self.pgresult
- load = self._transformer.load_row
- rv: List[Any] = [None] * (min(size, self.pgresult.ntuples - self._pos))
-
- for i in range(len(rv)):
- rv[i] = load(i + self._pos)
- self._pos += len(rv)
- return rv
+ if not size:
+ size = self.arraysize
+ records = self._transformer.load_rows(
+ self._pos, min(self._pos + size, self.pgresult.ntuples)
+ )
+ self._pos += len(records)
+ return records # type: ignore[return-value]
def fetchall(self) -> List[Sequence[Any]]:
"""
Return all the remaining records from the current recordset.
"""
self._check_result()
-
assert self.pgresult
- load = self._transformer.load_row
-
- rv: List[Any] = [None] * (self.pgresult.ntuples - self._pos)
- for i in range(len(rv)):
- rv[i] = load(i + self._pos)
-
- self._pos += len(rv)
- return rv
+ records = self._transformer.load_rows(self._pos, self.pgresult.ntuples)
+ self._pos += self.pgresult.ntuples
+ return records # type: ignore[return-value]
def __iter__(self) -> Iterator[Sequence[Any]]:
self._check_result()
async def fetchmany(self, size: int = 0) -> List[Sequence[Any]]:
self._check_result()
- if not size:
- size = self.arraysize
-
assert self.pgresult
- load = self._transformer.load_row
- rv: List[Any] = [None] * (min(size, self.pgresult.ntuples - self._pos))
-
- for i in range(len(rv)):
- rv[i] = load(i + self._pos)
- self._pos += len(rv)
- return rv
+ if not size:
+ size = self.arraysize
+ records = self._transformer.load_rows(
+ self._pos, min(self._pos + size, self.pgresult.ntuples)
+ )
+ self._pos += len(records)
+ return records # type: ignore[return-value]
async def fetchall(self) -> List[Sequence[Any]]:
self._check_result()
-
assert self.pgresult
- load = self._transformer.load_row
-
- rv: List[Any] = [None] * (self.pgresult.ntuples - self._pos)
- for i in range(len(rv)):
- rv[i] = load(i + self._pos)
-
- self._pos += len(rv)
- return rv
+ records = self._transformer.load_rows(self._pos, self.pgresult.ntuples)
+ self._pos += self.pgresult.ntuples
+ return records # type: ignore[return-value]
async def __aiter__(self) -> AsyncIterator[Sequence[Any]]:
self._check_result()
def get_dumper(self, obj: Any, format: Format) -> "Dumper":
...
+ def load_rows(self, row0: int, row1: int) -> Sequence[Tuple[Any, ...]]:
+ ...
+
def load_row(self, row: int) -> Optional[Tuple[Any, ...]]:
...
self, types: Sequence[int], formats: Sequence[Format]
) -> None: ...
def get_dumper(self, obj: Any, format: Format) -> Dumper: ...
+ def load_rows(self, row0: int, row1: int) -> Sequence[Tuple[Any, ...]]: ...
def load_row(self, row: int) -> Optional[Tuple[Any, ...]]: ...
def load_sequence(
self, record: Sequence[Optional[bytes]]
f" to format {Format(format).name}"
)
+ def load_rows(self, row0: int, row1: int) -> Sequence[Tuple[Any, ...]]:
+ if self._pgresult is None:
+ raise e.InterfaceError("result not set")
+
+ cdef int crow0 = row0
+ cdef int crow1 = row1
+ if not (0 <= row0 <= self._ntuples and 0 <= row1 <= self._ntuples):
+ raise e.InterfaceError(
+ f"rows must be included between 0 and {self._ntuples}"
+ )
+
+ cdef libpq.PGresult *res = self._pgresult.pgresult_ptr
+ # cheeky access to the internal PGresult structure
+ cdef pg_result_int *ires = <pg_result_int*>res
+
+ cdef RowLoader loader
+ cdef int row
+ cdef int col
+ cdef int length
+ cdef PGresAttValue *attval
+ cdef const char *val
+ cdef tuple record
+ cdef list records = [None] * (row1 - row0)
+
+ for row in range(row0, row1):
+ record = PyTuple_New(self._nfields)
+ for col in range(self._nfields):
+ attval = &(ires.tuples[row][col])
+ length = attval.len
+ if length == -1: # NULL_LEN
+ Py_INCREF(None)
+ PyTuple_SET_ITEM(record, col, None)
+ continue
+
+ # TODO: the is some visible python churn around this lookup.
+ # replace with a C array of borrowed references pointing to
+ # the cloader.cload function pointer
+ loader = self._row_loaders[col]
+ val = attval.value
+ if loader.cloader is not None:
+ pyval = loader.cloader.cload(val, length)
+ else:
+ # TODO: no copy
+ pyval = loader.pyloader(val[:length])
+
+ Py_INCREF(pyval)
+ PyTuple_SET_ITEM(record, col, pyval)
+
+ records[row - row0] = record
+
+ return records
+
def load_row(self, row: int) -> Optional[Tuple[Any, ...]]:
if self._pgresult is None:
return None
return None
cdef libpq.PGresult *res = self._pgresult.pgresult_ptr
+ # cheeky access to the internal PGresult structure
+ cdef pg_result_int *ires = <pg_result_int*>res
cdef RowLoader loader
cdef int col
cdef int length
- cdef const char *val
-
- # cheeky access to the internal PGresult structure
- cdef pg_result_int *ires = <pg_result_int*>res
cdef PGresAttValue *attval
+ cdef const char *val
+ cdef tuple record
- rv = PyTuple_New(self._nfields)
+ record = PyTuple_New(self._nfields)
for col in range(self._nfields):
attval = &(ires.tuples[crow][col])
length = attval.len
if length == -1: # NULL_LEN
Py_INCREF(None)
- PyTuple_SET_ITEM(rv, col, None)
+ PyTuple_SET_ITEM(record, col, None)
continue
# TODO: the is some visible python churn around this lookup.
pyval = loader.pyloader(val[:length])
Py_INCREF(pyval)
- PyTuple_SET_ITEM(rv, col, pyval)
+ PyTuple_SET_ITEM(record, col, pyval)
- return rv
+ return record
def load_sequence(self, record: Sequence[Optional[bytes]]) -> Tuple[Any, ...]:
cdef int length = len(record)