]> git.ipfire.org Git - thirdparty/sqlalchemy/sqlalchemy.git/commitdiff
use cython for result
authorFederico Caselli <cfederico87@gmail.com>
Thu, 13 Nov 2025 21:22:03 +0000 (22:22 +0100)
committerFederico Caselli <cfederico87@gmail.com>
Mon, 8 Dec 2025 20:45:41 +0000 (21:45 +0100)
Use cython for the internal result methods. This improves the speed
of the row processing of about 15-30% depending on cases.

Change-Id: Ia19c1a61aa1a55405b8327f88b1ee02627c4217a

13 files changed:
lib/sqlalchemy/engine/_result_cy.py [new file with mode: 0644]
lib/sqlalchemy/engine/cursor.py
lib/sqlalchemy/engine/result.py
lib/sqlalchemy/orm/loading.py
lib/sqlalchemy/util/_has_cython.py
lib/sqlalchemy/util/cython.py
setup.py
test/aaa_profiling/test_memusage.py
test/perf/compiled_extensions/base.py
test/perf/compiled_extensions/command.py
test/perf/compiled_extensions/result.py
test/profiles.txt
tools/cython_imports.py

diff --git a/lib/sqlalchemy/engine/_result_cy.py b/lib/sqlalchemy/engine/_result_cy.py
new file mode 100644 (file)
index 0000000..7bd3901
--- /dev/null
@@ -0,0 +1,633 @@
+# engine/_result_cy.py
+# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: https://www.opensource.org/licenses/mit-license.php
+# mypy: disable-error-code="misc,no-redef,type-arg,untyped-decorator"
+from __future__ import annotations
+
+from collections.abc import Callable
+from collections.abc import Iterator
+from collections.abc import Sequence
+from enum import Enum
+import operator
+from typing import Any
+from typing import Generic
+from typing import Literal
+from typing import overload
+from typing import TYPE_CHECKING
+from typing import TypeVar
+from typing import Union
+
+from .row import Row
+from .row import RowMapping
+from .. import exc
+from ..util import HasMemoized_ro_memoized_attribute
+from ..util.typing import Self
+from ..util.typing import TupleAny
+from ..util.typing import Unpack
+
+if TYPE_CHECKING:
+    from .result import _ProcessorsType
+    from .result import Result
+    from .result import ResultMetaData
+
+# START GENERATED CYTHON IMPORT
+# This section is automatically generated by the script tools/cython_imports.py
+try:
+    # NOTE: the cython compiler needs this "import cython" in the file, it
+    # can't be only "from sqlalchemy.util import cython" with the fallback
+    # in that module
+    import cython
+except ModuleNotFoundError:
+    from sqlalchemy.util import cython
+
+
+def _is_compiled() -> bool:
+    """Utility function to indicate if this module is compiled or not."""
+    return cython.compiled  # type: ignore[no-any-return,unused-ignore]
+
+
+# END GENERATED CYTHON IMPORT
+
+if cython.compiled:
+    from cython.cimports.cpython import Py_INCREF
+    from cython.cimports.cpython import PyList_New
+    from cython.cimports.cpython import PyList_SET_ITEM
+    from cython.cimports.cpython import PyTuple_New
+    from cython.cimports.cpython import PyTuple_SET_ITEM
+
+
+_RowData = Row[Unpack[TupleAny]] | RowMapping | Any
+"""A generic form of "row" that accommodates for the different kinds of
+"rows" that different result objects return, including row, row mapping, and
+scalar values"""
+_R = TypeVar("_R", bound=_RowData)
+_T = TypeVar("_T", bound=Any)
+
+_InterimRowType = Union[_R, TupleAny]
+"""a catchall "anything" kind of return type that can be applied
+across all the result types
+
+"""
+
+_UniqueFilterType = Callable[[Any], Any]
+_UniqueFilterStateType = tuple[set[Any], _UniqueFilterType | None]
+
+_FLAG_SIMPLE = cython.declare(cython.char, 0)
+_FLAG_SCALAR_TO_TUPLE = cython.declare(cython.char, 1)
+_FLAG_TUPLE_FILTER = cython.declare(cython.char, 2)
+
+
+# a symbol that indicates to internal Result methods that
+# "no row is returned".  We can't use None for those cases where a scalar
+# filter is applied to rows.
+class _NoRow(Enum):
+    _NO_ROW = 0
+
+
+_NO_ROW = _NoRow._NO_ROW
+
+
+class BaseResultInternal(Generic[_R]):
+    __slots__ = ()
+
+    _real_result: Result[Unpack[TupleAny]] | None = None
+    _generate_rows: bool = True
+    _row_logging_fn: Callable[[Any], Any] | None
+
+    _unique_filter_state: _UniqueFilterStateType | None = None
+    _post_creational_filter: Callable[[Any], Any] | None = None
+
+    _metadata: ResultMetaData
+
+    _source_supports_scalars: bool
+    _yield_per: int | None
+
+    def _fetchiter_impl(
+        self,
+    ) -> Iterator[_InterimRowType[Row[Unpack[TupleAny]]]]:
+        raise NotImplementedError()
+
+    def _fetchone_impl(
+        self, hard_close: bool = False
+    ) -> _InterimRowType[Row[Unpack[TupleAny]]] | None:
+        raise NotImplementedError()
+
+    def _fetchmany_impl(
+        self, size: int | None = None
+    ) -> list[_InterimRowType[Row[Unpack[TupleAny]]]]:
+        raise NotImplementedError()
+
+    def _fetchall_impl(
+        self,
+    ) -> list[_InterimRowType[Row[Unpack[TupleAny]]]]:
+        raise NotImplementedError()
+
+    def _soft_close(self, hard: bool = False) -> None:
+        raise NotImplementedError()
+
+    @HasMemoized_ro_memoized_attribute
+    def _row_getter(
+        self,
+    ) -> tuple[Callable[..., _R] | None, Callable[..., Sequence[_R]] | None]:
+        real_result = self if self._real_result is None else self._real_result
+
+        metadata = self._metadata
+        tuple_filters = metadata._tuplefilter
+        flag: cython.char = _FLAG_SIMPLE
+
+        if real_result._source_supports_scalars:
+            if not self._generate_rows:
+                return None, None
+            else:
+                flag = _FLAG_SCALAR_TO_TUPLE
+        elif tuple_filters is not None:
+            flag = _FLAG_TUPLE_FILTER
+
+        processors: tuple
+        proc_valid: tuple
+
+        if metadata._effective_processors is not None:
+            ep = metadata._effective_processors
+            if flag == _FLAG_TUPLE_FILTER:
+                ep = tuple_filters(ep)
+
+            processors = tuple(ep)
+            proc_valid = tuple(
+                [i for i, p in enumerate(processors) if p is not None]
+            )
+        else:
+            processors = ()
+            proc_valid = ()
+
+        proc_size: cython.Py_ssize_t = len(processors)
+        log_row = real_result._row_logging_fn
+        has_log_row: cython.bint = log_row is not None
+
+        key_to_index = metadata._key_to_index
+        _Row = Row
+
+        if flag == _FLAG_SIMPLE and proc_size == 0 and not has_log_row:
+            # just build the rows
+
+            def single_row_simple(input_row: Sequence[Any], /) -> Row:
+                return _Row(metadata, None, key_to_index, input_row)
+
+            if cython.compiled:
+
+                def many_rows_simple(rows: Sequence[Any], /) -> list[Any]:
+                    size: cython.Py_hash_t = len(rows)
+                    i: cython.Py_ssize_t
+                    result: list = PyList_New(size)
+                    for i in range(size):
+                        row: object = _Row(
+                            metadata, None, key_to_index, rows[i]
+                        )
+                        Py_INCREF(row)
+                        PyList_SET_ITEM(result, i, row)
+                    return result
+
+            else:
+
+                def many_rows_simple(rows: Sequence[Any], /) -> list[Any]:
+                    return [
+                        _Row(metadata, None, key_to_index, row) for row in rows
+                    ]
+
+            return single_row_simple, many_rows_simple  # type: ignore[return-value] # noqa: E501
+
+        first_row: cython.bint = True
+
+        def single_row(input_row: Sequence[Any], /) -> Row:
+            nonlocal first_row
+
+            if flag == _FLAG_SCALAR_TO_TUPLE:
+                input_row = (input_row,)
+            elif flag == _FLAG_TUPLE_FILTER:
+                input_row = tuple_filters(input_row)
+
+            if proc_size != 0:
+                if first_row:
+                    first_row = False
+                    assert len(input_row) == proc_size
+                input_row = _apply_processors(
+                    processors, proc_size, proc_valid, input_row
+                )
+
+            row: Row = _Row(metadata, None, key_to_index, input_row)
+            if has_log_row:
+                row = log_row(row)
+            return row
+
+        if cython.compiled:
+
+            def many_rows(rows: Sequence[Any], /) -> list[Any]:
+                size: cython.Py_hash_t = len(rows)
+                i: cython.Py_ssize_t
+                result: list = PyList_New(size)
+                for i in range(size):
+                    row: object = single_row(rows[i])
+                    Py_INCREF(row)
+                    PyList_SET_ITEM(result, i, row)
+                return result
+
+        else:
+
+            def many_rows(rows: Sequence[Any], /) -> list[Any]:
+                return [single_row(row) for row in rows]
+
+        return single_row, many_rows  # type: ignore[return-value]
+
+    @HasMemoized_ro_memoized_attribute
+    def _iterator_getter(self) -> Callable[[], Iterator[_R]]:
+        make_row = self._row_getter[0]
+
+        post_creational_filter = self._post_creational_filter
+
+        if self._unique_filter_state is not None:
+            uniques: set
+            uniques, strategy = self._unique_strategy
+
+            def iterrows() -> Iterator[_R]:
+                for raw_row in self._fetchiter_impl():
+                    row = (
+                        make_row(raw_row) if make_row is not None else raw_row
+                    )
+                    hashed = strategy(row) if strategy is not None else row
+                    if hashed in uniques:
+                        continue
+                    uniques.add(hashed)
+                    if post_creational_filter is not None:
+                        row = post_creational_filter(row)
+                    yield row
+
+        else:
+
+            def iterrows() -> Iterator[_R]:
+                for raw_row in self._fetchiter_impl():
+                    row = (
+                        make_row(raw_row) if make_row is not None else raw_row
+                    )
+                    if post_creational_filter is not None:
+                        row = post_creational_filter(row)
+                    yield row
+
+        return iterrows
+
+    def _raw_all_rows(self) -> Sequence[_R]:
+        make_rows = self._row_getter[1]
+        assert make_rows is not None
+        return make_rows(self._fetchall_impl())
+
+    def _allrows(self) -> Sequence[_R]:
+        post_creational_filter = self._post_creational_filter
+
+        make_rows = self._row_getter[1]
+
+        rows = self._fetchall_impl()
+        made_rows: Sequence[_InterimRowType[_R]]
+        if make_rows is not None:
+            made_rows = make_rows(rows)
+        else:
+            made_rows = rows
+
+        interim_rows: Sequence[_R]
+
+        if self._unique_filter_state is not None:
+            uniques: set
+            uniques, strategy = self._unique_strategy
+            interim_rows = _apply_unique_strategy(
+                made_rows, [], uniques, strategy
+            )
+        else:
+            interim_rows = made_rows  # type: ignore
+
+        if post_creational_filter is not None:
+            interim_rows = [
+                post_creational_filter(row) for row in interim_rows
+            ]
+        return interim_rows
+
+    @HasMemoized_ro_memoized_attribute
+    def _onerow_getter(
+        self,
+    ) -> Callable[[Self], Literal[_NoRow._NO_ROW] | _R]:
+        make_row = self._row_getter[0]
+
+        post_creational_filter = self._post_creational_filter
+
+        if self._unique_filter_state is not None:
+            uniques: set
+            uniques, strategy = self._unique_strategy
+
+            def onerow(self: Self) -> Literal[_NoRow._NO_ROW] | _R:
+                while True:
+                    row = self._fetchone_impl()
+                    if row is None:
+                        return _NO_ROW
+                    else:
+                        obj: _InterimRowType[Any] = (
+                            make_row(row) if make_row is not None else row
+                        )
+                        hashed = strategy(obj) if strategy is not None else obj
+                        if hashed in uniques:
+                            continue
+                        uniques.add(hashed)
+                        if post_creational_filter is not None:
+                            obj = post_creational_filter(obj)
+                        return obj  # type: ignore
+
+        else:
+
+            def onerow(self: Self) -> Literal[_NoRow._NO_ROW] | _R:
+                row = self._fetchone_impl()
+                if row is None:
+                    return _NO_ROW
+                else:
+                    interim_row: _InterimRowType[Any] = (
+                        make_row(row) if make_row is not None else row
+                    )
+                    if post_creational_filter is not None:
+                        interim_row = post_creational_filter(interim_row)
+                    return interim_row  # type: ignore
+
+        return onerow
+
+    @HasMemoized_ro_memoized_attribute
+    def _manyrow_getter(self) -> Callable[[Self, int | None], Sequence[_R]]:
+        make_rows = self._row_getter[1]
+        real_result = self if self._real_result is None else self._real_result
+        yield_per = real_result._yield_per
+
+        post_creational_filter = self._post_creational_filter
+
+        if self._unique_filter_state:
+            uniques: set
+            uniques, strategy = self._unique_strategy
+
+            def manyrows(self: Self, num: int | None, /) -> Sequence[_R]:
+                made_rows: Sequence[Any]
+                collect: list[_R] = []
+
+                _manyrows = self._fetchmany_impl
+
+                if num is None:
+                    # if None is passed, we don't know the default
+                    # manyrows number, DBAPI has this as cursor.arraysize
+                    # different DBAPIs / fetch strategies may be different.
+                    # do a fetch to find what the number is.  if there are
+                    # only fewer rows left, then it doesn't matter.
+                    if yield_per:
+                        num_required = num = yield_per
+                    else:
+                        rows = _manyrows()
+                        num = len(rows)
+                        made_rows = (
+                            rows if make_rows is None else make_rows(rows)
+                        )
+                        _apply_unique_strategy(
+                            made_rows, collect, uniques, strategy
+                        )
+                        num_required = num - len(collect)
+                else:
+                    num_required = num
+
+                assert num is not None
+
+                while num_required:
+                    rows = _manyrows(num_required)
+                    if not rows:
+                        break
+
+                    made_rows = rows if make_rows is None else make_rows(rows)
+                    _apply_unique_strategy(
+                        made_rows, collect, uniques, strategy
+                    )
+                    num_required = num - len(collect)
+
+                if post_creational_filter is not None:
+                    collect = [post_creational_filter(row) for row in collect]
+                return collect
+
+        else:
+
+            def manyrows(self: Self, num: int | None, /) -> Sequence[_R]:
+                if num is None:
+                    num = yield_per
+
+                rows: Sequence = self._fetchmany_impl(num)
+                if make_rows is not None:
+                    rows = make_rows(rows)
+                if post_creational_filter is not None:
+                    rows = [post_creational_filter(row) for row in rows]
+                return rows
+
+        return manyrows
+
+    @overload
+    def _only_one_row(
+        self: BaseResultInternal[Row[_T, Unpack[TupleAny]]],
+        raise_for_second_row: bool,
+        raise_for_none: bool,
+        scalar: Literal[True],
+    ) -> _T: ...
+
+    @overload
+    def _only_one_row(
+        self,
+        raise_for_second_row: bool,
+        raise_for_none: Literal[True],
+        scalar: bool,
+    ) -> _R: ...
+
+    @overload
+    def _only_one_row(
+        self,
+        raise_for_second_row: bool,
+        raise_for_none: bool,
+        scalar: bool,
+    ) -> _R | None: ...
+
+    def _only_one_row(
+        self,
+        raise_for_second_row: bool,
+        raise_for_none: bool,
+        scalar: bool,
+    ) -> _R | None:
+        onerow = self._fetchone_impl
+
+        row = onerow(hard_close=True)
+        if row is None:
+            if raise_for_none:
+                raise exc.NoResultFound(
+                    "No row was found when one was required"
+                )
+            else:
+                return None
+
+        if scalar and self._source_supports_scalars:
+            self._generate_rows = False
+            make_row = None
+        else:
+            make_row = self._row_getter[0]
+
+        try:
+            row = make_row(row) if make_row is not None else row  # type: ignore[assignment] # noqa: E501
+        except:
+            self._soft_close(hard=True)
+            raise
+
+        if raise_for_second_row:
+            if self._unique_filter_state:
+                # for no second row but uniqueness, need to essentially
+                # consume the entire result :(
+                strategy = self._unique_strategy[1]
+
+                existing_row_hash = (
+                    strategy(row) if strategy is not None else row
+                )
+
+                while True:
+                    next_row: Any = onerow(hard_close=True)
+                    if next_row is None:
+                        next_row = _NO_ROW
+                        break
+
+                    try:
+                        next_row = (
+                            make_row(next_row)
+                            if make_row is not None
+                            else next_row
+                        )
+
+                        if strategy is not None:
+                            # assert next_row is not _NO_ROW
+                            if existing_row_hash == strategy(next_row):
+                                continue
+                        elif row == next_row:
+                            continue
+                        # here, we have a row and it's different
+                        break
+                    except:
+                        self._soft_close(hard=True)
+                        raise
+            else:
+                next_row = onerow(hard_close=True)
+                if next_row is None:
+                    next_row = _NO_ROW
+
+            if next_row is not _NO_ROW:
+                self._soft_close(hard=True)
+                raise exc.MultipleResultsFound(
+                    "Multiple rows were found when exactly one was required"
+                    if raise_for_none
+                    else "Multiple rows were found when one or none "
+                    "was required"
+                )
+        else:
+            next_row = _NO_ROW
+            # if we checked for second row then that would have
+            # closed us :)
+            self._soft_close(hard=True)
+
+        if not scalar:
+            post_creational_filter = self._post_creational_filter
+            if post_creational_filter is not None:
+                row = post_creational_filter(row)
+
+        if scalar and make_row is not None:
+            return row[0]  # type: ignore
+        else:
+            return row  # type: ignore
+
+    def _iter_impl(self) -> Iterator[_R]:
+        return self._iterator_getter()
+
+    def _next_impl(self) -> _R:
+        row = self._onerow_getter(self)
+        if row is _NO_ROW:
+            raise StopIteration()
+        else:
+            return row
+
+    @HasMemoized_ro_memoized_attribute
+    def _unique_strategy(self) -> _UniqueFilterStateType:
+        assert self._unique_filter_state is not None
+        uniques, strategy = self._unique_filter_state
+
+        if strategy is None and self._metadata._unique_filters is not None:
+            real_result = (
+                self if self._real_result is None else self._real_result
+            )
+            if (
+                real_result._source_supports_scalars
+                and not self._generate_rows
+            ):
+                strategy = self._metadata._unique_filters[0]
+            else:
+                filters = self._metadata._unique_filters
+                if self._metadata._tuplefilter is not None:
+                    filters = self._metadata._tuplefilter(filters)
+
+                strategy = operator.methodcaller("_filter_on_values", filters)
+        return uniques, strategy
+
+
+if cython.compiled:
+
+    @cython.inline
+    @cython.cfunc
+    @cython.wraparound(False)
+    @cython.boundscheck(False)
+    def _apply_processors(
+        proc: tuple,
+        proc_size: cython.Py_ssize_t,
+        proc_valid: object,  # used only by python impl
+        data: Sequence,
+    ) -> tuple[Any, ...]:
+        res: tuple = PyTuple_New(proc_size)
+        i: cython.Py_ssize_t
+        for i in range(proc_size):
+            p = proc[i]
+            if p is not None:
+                value = p(data[i])
+            else:
+                value = data[i]
+            Py_INCREF(value)
+            PyTuple_SET_ITEM(res, i, value)
+        return res
+
+else:
+
+    def _apply_processors(
+        proc: _ProcessorsType,
+        proc_size: int,  # used only by cython impl
+        proc_valid: tuple[int, ...],
+        data: Sequence[Any],
+    ) -> tuple[Any, ...]:
+        res = list(data)
+        for i in proc_valid:
+            res[i] = proc[i](res[i])
+        return tuple(res)
+
+
+@cython.inline
+@cython.cfunc
+def _apply_unique_strategy(
+    rows: Sequence[Any],
+    destination: list[Any],
+    uniques: set[Any],
+    strategy: Callable[[Any], Any] | None,
+) -> list[Any]:
+    i: cython.Py_ssize_t
+    has_strategy: cython.bint = strategy is not None
+    for i in range(len(rows)):
+        row = rows[i]
+        hashed = strategy(row) if has_strategy else row
+        if hashed in uniques:
+            continue
+        uniques.add(hashed)
+        destination.append(row)
+    return destination
index be4ec643c1f28de0550b1cc30fbffb1c07fb374f..1f1166399674d8cd6d8caffdb33118636d1afbbb 100644 (file)
@@ -12,7 +12,6 @@
 from __future__ import annotations
 
 import collections
-import functools
 import operator
 import typing
 from typing import Any
@@ -1657,30 +1656,7 @@ class CursorResult(Result[Unpack[_Ts]]):
         )
 
         if cursor_description is not None:
-            # inline of Result._row_getter(), set up an initial row
-            # getter assuming no transformations will be called as this
-            # is the most common case
-
-            metadata = self._init_metadata(context, cursor_description)
-
-            _make_row: Any
-            proc = metadata._effective_processors
-            tf = metadata._tuplefilter
-            _make_row = functools.partial(
-                Row,
-                metadata,
-                proc if tf is None or proc is None else tf(proc),
-                metadata._key_to_index,
-            )
-            if tf is not None:
-                _fixed_tf = tf  # needed to make mypy happy...
-
-                def _sliced_row(raw_data: Any) -> Any:
-                    return _make_row(_fixed_tf(raw_data))
-
-                sliced_row = _sliced_row
-            else:
-                sliced_row = _make_row
+            self._init_metadata(context, cursor_description)
 
             if echo:
                 log = self.context.connection._log_debug
@@ -1691,13 +1667,8 @@ class CursorResult(Result[Unpack[_Ts]]):
 
                 self._row_logging_fn = _log_row
 
-                def _make_row_2(row: Any) -> Any:
-                    return _log_row(sliced_row(row))
-
-                make_row = _make_row_2
-            else:
-                make_row = sliced_row  # type: ignore[assignment]
-            self._set_memoized_attribute("_row_getter", make_row)
+            # call Result._row_getter to set up the row factory
+            self._row_getter
 
         else:
             assert context._num_sentinel_cols == 0
index 844db160f6a5c4700019cdcc958afa50023f2ddc..05f7df7c5a8059a7ad958e0c679ac914cf7ef922 100644 (file)
@@ -9,7 +9,6 @@
 
 from __future__ import annotations
 
-from enum import Enum
 import functools
 import itertools
 import operator
@@ -28,22 +27,25 @@ from typing import NoReturn
 from typing import Optional
 from typing import overload
 from typing import Sequence
-from typing import Set
 from typing import Tuple
 from typing import TYPE_CHECKING
-from typing import TypeVar
 from typing import Union
 
+from ._result_cy import _InterimRowType
+from ._result_cy import _NO_ROW as _NO_ROW
+from ._result_cy import _R as _R
+from ._result_cy import _RowData
+from ._result_cy import _T
+from ._result_cy import _UniqueFilterType as _UniqueFilterType
+from ._result_cy import BaseResultInternal
 from ._util_cy import tuplegetter as tuplegetter
 from .row import Row
 from .row import RowMapping
 from .. import exc
 from .. import util
 from ..sql.base import _generative
-from ..sql.base import HasMemoized
 from ..sql.base import InPlaceGenerative
 from ..util import deprecated
-from ..util import HasMemoized_ro_memoized_attribute
 from ..util import NONE_SET
 from ..util.typing import Self
 from ..util.typing import TupleAny
@@ -63,28 +65,13 @@ _KeyMapRecType = Any
 _KeyMapType = Mapping[_KeyType, _KeyMapRecType]
 
 
-_RowData = Union[Row[Unpack[TupleAny]], RowMapping, Any]
-"""A generic form of "row" that accommodates for the different kinds of
-"rows" that different result objects return, including row, row mapping, and
-scalar values"""
-
-
-_R = TypeVar("_R", bound=_RowData)
-_T = TypeVar("_T", bound=Any)
 _Ts = TypeVarTuple("_Ts")
 
-_InterimRowType = Union[_R, TupleAny]
-"""a catchall "anything" kind of return type that can be applied
-across all the result types
-
-"""
 
 _InterimSupportsScalarsRowType = Union[Row[Unpack[TupleAny]], Any]
 
 _ProcessorsType = Sequence[Optional["_ResultProcessorType[Any]"]]
 _TupleGetterType = Callable[[Sequence[Any]], Sequence[Any]]
-_UniqueFilterType = Callable[[Any], Any]
-_UniqueFilterStateType = Tuple[Set[Any], Optional[_UniqueFilterType]]
 
 
 class ResultMetaData:
@@ -391,456 +378,10 @@ def result_tuple(
     )
 
 
-# a symbol that indicates to internal Result methods that
-# "no row is returned".  We can't use None for those cases where a scalar
-# filter is applied to rows.
-class _NoRow(Enum):
-    _NO_ROW = 0
-
-
-_NO_ROW = _NoRow._NO_ROW
-
-
-class ResultInternal(InPlaceGenerative, Generic[_R]):
+class ResultInternal(InPlaceGenerative, BaseResultInternal[_R]):
     __slots__ = ()
-
-    _real_result: Optional[Result[Unpack[TupleAny]]] = None
-    _generate_rows: bool = True
-    _row_logging_fn: Optional[Callable[[Any], Any]]
-
-    _unique_filter_state: Optional[_UniqueFilterStateType] = None
-    _post_creational_filter: Optional[Callable[[Any], Any]] = None
     _is_cursor = False
 
-    _metadata: ResultMetaData
-
-    _source_supports_scalars: bool
-
-    def _fetchiter_impl(
-        self,
-    ) -> Iterator[_InterimRowType[Row[Unpack[TupleAny]]]]:
-        raise NotImplementedError()
-
-    def _fetchone_impl(
-        self, hard_close: bool = False
-    ) -> Optional[_InterimRowType[Row[Unpack[TupleAny]]]]:
-        raise NotImplementedError()
-
-    def _fetchmany_impl(
-        self, size: Optional[int] = None
-    ) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]:
-        raise NotImplementedError()
-
-    def _fetchall_impl(
-        self,
-    ) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]:
-        raise NotImplementedError()
-
-    def _soft_close(self, hard: bool = False) -> None:
-        raise NotImplementedError()
-
-    @HasMemoized_ro_memoized_attribute
-    def _row_getter(self) -> Optional[Callable[..., _R]]:
-        real_result: Result[Unpack[TupleAny]] = (
-            self._real_result
-            if self._real_result
-            else cast("Result[Unpack[TupleAny]]", self)
-        )
-
-        if real_result._source_supports_scalars:
-            if not self._generate_rows:
-                return None
-            else:
-                _proc = Row
-
-                def process_row(
-                    metadata: ResultMetaData,
-                    processors: Optional[_ProcessorsType],
-                    key_to_index: Dict[_KeyType, int],
-                    scalar_obj: Any,
-                ) -> Row[Unpack[TupleAny]]:
-                    return _proc(
-                        metadata, processors, key_to_index, (scalar_obj,)
-                    )
-
-        else:
-            process_row = Row  # type: ignore
-
-        metadata = self._metadata
-
-        key_to_index = metadata._key_to_index
-        processors = metadata._effective_processors
-        tf = metadata._tuplefilter
-
-        if tf and not real_result._source_supports_scalars:
-            if processors:
-                processors = tf(processors)
-
-            _make_row_orig: Callable[..., _R] = functools.partial(  # type: ignore  # noqa E501
-                process_row, metadata, processors, key_to_index
-            )
-
-            fixed_tf = tf
-
-            def make_row(row: _InterimRowType[Row[Unpack[TupleAny]]]) -> _R:
-                return _make_row_orig(fixed_tf(row))
-
-        else:
-            make_row = functools.partial(  # type: ignore
-                process_row, metadata, processors, key_to_index
-            )
-
-        if real_result._row_logging_fn:
-            _log_row = real_result._row_logging_fn
-            _make_row = make_row
-
-            def make_row(row: _InterimRowType[Row[Unpack[TupleAny]]]) -> _R:
-                return _log_row(_make_row(row))  # type: ignore
-
-        return make_row
-
-    @HasMemoized_ro_memoized_attribute
-    def _iterator_getter(self) -> Callable[..., Iterator[_R]]:
-        make_row = self._row_getter
-
-        post_creational_filter = self._post_creational_filter
-
-        if self._unique_filter_state:
-            uniques, strategy = self._unique_strategy
-
-            def iterrows(self: Result[Unpack[TupleAny]]) -> Iterator[_R]:
-                for raw_row in self._fetchiter_impl():
-                    obj: _InterimRowType[Any] = (
-                        make_row(raw_row) if make_row else raw_row
-                    )
-                    hashed = strategy(obj) if strategy else obj
-                    if hashed in uniques:
-                        continue
-                    uniques.add(hashed)
-                    if post_creational_filter:
-                        obj = post_creational_filter(obj)
-                    yield obj  # type: ignore
-
-        else:
-
-            def iterrows(self: Result[Unpack[TupleAny]]) -> Iterator[_R]:
-                for raw_row in self._fetchiter_impl():
-                    row: _InterimRowType[Any] = (
-                        make_row(raw_row) if make_row else raw_row
-                    )
-                    if post_creational_filter:
-                        row = post_creational_filter(row)
-                    yield row  # type: ignore
-
-        return iterrows
-
-    def _raw_all_rows(self) -> List[_R]:
-        make_row = self._row_getter
-        assert make_row is not None
-        rows = self._fetchall_impl()
-        return [make_row(row) for row in rows]
-
-    def _allrows(self) -> List[_R]:
-        post_creational_filter = self._post_creational_filter
-
-        make_row = self._row_getter
-
-        rows = self._fetchall_impl()
-        made_rows: List[_InterimRowType[_R]]
-        if make_row:
-            made_rows = [make_row(row) for row in rows]
-        else:
-            made_rows = rows  # type: ignore
-
-        interim_rows: List[_R]
-
-        if self._unique_filter_state:
-            uniques, strategy = self._unique_strategy
-
-            interim_rows = [
-                made_row  # type: ignore
-                for made_row, sig_row in [
-                    (
-                        made_row,
-                        strategy(made_row) if strategy else made_row,
-                    )
-                    for made_row in made_rows
-                ]
-                if sig_row not in uniques and not uniques.add(sig_row)  # type: ignore # noqa: E501
-            ]
-        else:
-            interim_rows = made_rows  # type: ignore
-
-        if post_creational_filter:
-            interim_rows = [
-                post_creational_filter(row) for row in interim_rows
-            ]
-        return interim_rows
-
-    @HasMemoized_ro_memoized_attribute
-    def _onerow_getter(
-        self,
-    ) -> Callable[..., Union[Literal[_NoRow._NO_ROW], _R]]:
-        make_row = self._row_getter
-
-        post_creational_filter = self._post_creational_filter
-
-        if self._unique_filter_state:
-            uniques, strategy = self._unique_strategy
-
-            def onerow(self: Result[Unpack[TupleAny]]) -> Union[_NoRow, _R]:
-                _onerow = self._fetchone_impl
-                while True:
-                    row = _onerow()
-                    if row is None:
-                        return _NO_ROW
-                    else:
-                        obj: _InterimRowType[Any] = (
-                            make_row(row) if make_row else row
-                        )
-                        hashed = strategy(obj) if strategy else obj
-                        if hashed in uniques:
-                            continue
-                        else:
-                            uniques.add(hashed)
-                        if post_creational_filter:
-                            obj = post_creational_filter(obj)
-                        return obj  # type: ignore
-
-        else:
-
-            def onerow(self: Result[Unpack[TupleAny]]) -> Union[_NoRow, _R]:
-                row = self._fetchone_impl()
-                if row is None:
-                    return _NO_ROW
-                else:
-                    interim_row: _InterimRowType[Any] = (
-                        make_row(row) if make_row else row
-                    )
-                    if post_creational_filter:
-                        interim_row = post_creational_filter(interim_row)
-                    return interim_row  # type: ignore
-
-        return onerow
-
-    @HasMemoized_ro_memoized_attribute
-    def _manyrow_getter(self) -> Callable[..., List[_R]]:
-        make_row = self._row_getter
-
-        post_creational_filter = self._post_creational_filter
-
-        if self._unique_filter_state:
-            uniques, strategy = self._unique_strategy
-
-            def filterrows(
-                make_row: Optional[Callable[..., _R]],
-                rows: List[Any],
-                strategy: Optional[Callable[[List[Any]], Any]],
-                uniques: Set[Any],
-            ) -> List[_R]:
-                if make_row:
-                    rows = [make_row(row) for row in rows]
-
-                if strategy:
-                    made_rows = (
-                        (made_row, strategy(made_row)) for made_row in rows
-                    )
-                else:
-                    made_rows = ((made_row, made_row) for made_row in rows)
-                return [
-                    made_row
-                    for made_row, sig_row in made_rows
-                    if sig_row not in uniques and not uniques.add(sig_row)  # type: ignore  # noqa: E501
-                ]
-
-            def manyrows(
-                self: ResultInternal[_R], num: Optional[int]
-            ) -> List[_R]:
-                collect: List[_R] = []
-
-                _manyrows = self._fetchmany_impl
-
-                if num is None:
-                    # if None is passed, we don't know the default
-                    # manyrows number, DBAPI has this as cursor.arraysize
-                    # different DBAPIs / fetch strategies may be different.
-                    # do a fetch to find what the number is.  if there are
-                    # only fewer rows left, then it doesn't matter.
-                    real_result = (
-                        self._real_result
-                        if self._real_result
-                        else cast("Result[Unpack[TupleAny]]", self)
-                    )
-                    if real_result._yield_per:
-                        num_required = num = real_result._yield_per
-                    else:
-                        rows = _manyrows(num)
-                        num = len(rows)
-                        assert make_row is not None
-                        collect.extend(
-                            filterrows(make_row, rows, strategy, uniques)
-                        )
-                        num_required = num - len(collect)
-                else:
-                    num_required = num
-
-                assert num is not None
-
-                while num_required:
-                    rows = _manyrows(num_required)
-                    if not rows:
-                        break
-
-                    collect.extend(
-                        filterrows(make_row, rows, strategy, uniques)
-                    )
-                    num_required = num - len(collect)
-
-                if post_creational_filter:
-                    collect = [post_creational_filter(row) for row in collect]
-                return collect
-
-        else:
-
-            def manyrows(
-                self: ResultInternal[_R], num: Optional[int]
-            ) -> List[_R]:
-                if num is None:
-                    real_result = (
-                        self._real_result
-                        if self._real_result
-                        else cast("Result[Unpack[TupleAny]]", self)
-                    )
-                    num = real_result._yield_per
-
-                rows: List[_InterimRowType[Any]] = self._fetchmany_impl(num)
-                if make_row:
-                    rows = [make_row(row) for row in rows]
-                if post_creational_filter:
-                    rows = [post_creational_filter(row) for row in rows]
-                return rows  # type: ignore
-
-        return manyrows
-
-    @overload
-    def _only_one_row(
-        self: ResultInternal[Row[_T, Unpack[TupleAny]]],
-        raise_for_second_row: bool,
-        raise_for_none: bool,
-        scalar: Literal[True],
-    ) -> _T: ...
-
-    @overload
-    def _only_one_row(
-        self,
-        raise_for_second_row: bool,
-        raise_for_none: Literal[True],
-        scalar: bool,
-    ) -> _R: ...
-
-    @overload
-    def _only_one_row(
-        self,
-        raise_for_second_row: bool,
-        raise_for_none: bool,
-        scalar: bool,
-    ) -> Optional[_R]: ...
-
-    def _only_one_row(
-        self,
-        raise_for_second_row: bool,
-        raise_for_none: bool,
-        scalar: bool,
-    ) -> Optional[_R]:
-        onerow = self._fetchone_impl
-
-        row: Optional[_InterimRowType[Any]] = onerow(hard_close=True)
-        if row is None:
-            if raise_for_none:
-                raise exc.NoResultFound(
-                    "No row was found when one was required"
-                )
-            else:
-                return None
-
-        if scalar and self._source_supports_scalars:
-            self._generate_rows = False
-            make_row = None
-        else:
-            make_row = self._row_getter
-
-        try:
-            row = make_row(row) if make_row else row
-        except:
-            self._soft_close(hard=True)
-            raise
-
-        if raise_for_second_row:
-            if self._unique_filter_state:
-                # for no second row but uniqueness, need to essentially
-                # consume the entire result :(
-                uniques, strategy = self._unique_strategy
-
-                existing_row_hash = strategy(row) if strategy else row
-
-                while True:
-                    next_row: Any = onerow(hard_close=True)
-                    if next_row is None:
-                        next_row = _NO_ROW
-                        break
-
-                    try:
-                        next_row = make_row(next_row) if make_row else next_row
-
-                        if strategy:
-                            assert next_row is not _NO_ROW
-                            if existing_row_hash == strategy(next_row):
-                                continue
-                        elif row == next_row:
-                            continue
-                        # here, we have a row and it's different
-                        break
-                    except:
-                        self._soft_close(hard=True)
-                        raise
-            else:
-                next_row = onerow(hard_close=True)
-                if next_row is None:
-                    next_row = _NO_ROW
-
-            if next_row is not _NO_ROW:
-                self._soft_close(hard=True)
-                raise exc.MultipleResultsFound(
-                    "Multiple rows were found when exactly one was required"
-                    if raise_for_none
-                    else "Multiple rows were found when one or none "
-                    "was required"
-                )
-        else:
-            # if we checked for second row then that would have
-            # closed us :)
-            self._soft_close(hard=True)
-
-        if not scalar:
-            post_creational_filter = self._post_creational_filter
-            if post_creational_filter:
-                row = post_creational_filter(row)
-
-        if scalar and make_row:
-            return row[0]  # type: ignore
-        else:
-            return row  # type: ignore
-
-    def _iter_impl(self) -> Iterator[_R]:
-        return self._iterator_getter(self)
-
-    def _next_impl(self) -> _R:
-        row = self._onerow_getter(self)
-        if row is _NO_ROW:
-            raise StopIteration()
-        else:
-            return row
-
     @_generative
     def _column_slices(self, indexes: Sequence[_KeyIndexType]) -> Self:
         real_result = (
@@ -856,31 +397,6 @@ class ResultInternal(InPlaceGenerative, Generic[_R]):
 
         return self
 
-    @HasMemoized.memoized_attribute
-    def _unique_strategy(self) -> _UniqueFilterStateType:
-        assert self._unique_filter_state is not None
-        uniques, strategy = self._unique_filter_state
-
-        real_result = (
-            self._real_result
-            if self._real_result is not None
-            else cast("Result[Unpack[TupleAny]]", self)
-        )
-
-        if not strategy and self._metadata._unique_filters:
-            if (
-                real_result._source_supports_scalars
-                and not self._generate_rows
-            ):
-                strategy = self._metadata._unique_filters[0]
-            else:
-                filters = self._metadata._unique_filters
-                if self._metadata._tuplefilter:
-                    filters = self._metadata._tuplefilter(filters)
-
-                strategy = operator.methodcaller("_filter_on_values", filters)
-        return uniques, strategy
-
 
 class _WithKeys:
     __slots__ = ()
index 9f78711e5771318b43fc16abc470a0c47dec5724..ad874272db9e6528603eef406aa4d0a48a17d38a 100644 (file)
@@ -292,7 +292,7 @@ def instances(
                 "against collections"
             )
 
-        result._unique_filter_state = (None, require_unique)
+        result._unique_filter_state = (set(), require_unique)
 
     if context.yield_per:
         result.yield_per(context.yield_per)
index b34e27c159cc8b24154bce568805ca5998834859..bfd92a14531f15f28f704d0e562002f401fb6ccf 100644 (file)
@@ -16,6 +16,7 @@ def _all_cython_modules():
     from . import _collections_cy
     from . import _immutabledict_cy
     from ..engine import _processors_cy
+    from ..engine import _result_cy
     from ..engine import _row_cy
     from ..engine import _util_cy as engine_util
     from ..sql import _util_cy as sql_util
@@ -24,6 +25,7 @@ def _all_cython_modules():
         _collections_cy,
         _immutabledict_cy,
         _processors_cy,
+        _result_cy,
         _row_cy,
         engine_util,
         sql_util,
index 288e497650a96dbc7210ef3cac766421f946ec9c..e66742d46791c95abfdea17e7ae4f55e23570c9c 100644 (file)
@@ -22,6 +22,7 @@ compiled = False
 
 # types
 int = int  # noqa: A001
+char = int
 bint = bool
 longlong = int
 ulonglong = int
index 2adbae3222f0f76cdcac93d6452d6ec84b849020..8fa4dde279a4d08337f2efc4f56b6e0a5e9f0fda 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@ if DISABLE_EXTENSION and REQUIRE_EXTENSION:
 CYTHON_MODULES = (
     "engine._processors_cy",
     "engine._row_cy",
+    "engine._result_cy",
     "engine._util_cy",
     "sql._util_cy",
     "util._collections_cy",
index f61dcfd5bd730d845a14b05d16a5c341d9a01d15..6223b86821121c4dfe67b95e28bc87dbb138c0e2 100644 (file)
@@ -1507,7 +1507,7 @@ class CycleTest(_fixtures.FixtureTest):
 
         stmt = s.query(User).join(User.addresses).statement
 
-        @assert_cycles(4)
+        @assert_cycles(8)
         def go():
             result = s.execute(stmt)
             rows = result.fetchall()  # noqa
@@ -1522,7 +1522,7 @@ class CycleTest(_fixtures.FixtureTest):
 
         stmt = s.query(User).join(User.addresses).statement
 
-        @assert_cycles(4)
+        @assert_cycles(8)
         def go():
             result = s.execute(stmt)
             for partition in result.partitions(3):
@@ -1538,7 +1538,7 @@ class CycleTest(_fixtures.FixtureTest):
 
         stmt = s.query(User).join(User.addresses).statement
 
-        @assert_cycles(4)
+        @assert_cycles(8)
         def go():
             result = s.execute(stmt)
             for partition in result.unique().partitions(3):
index ccf222437cfb67a2a695e669f0bf4b1a5c3de8ea..f64e45fd6971090230be4d0923cadd45541c33d2 100644 (file)
@@ -1,5 +1,4 @@
 from collections import defaultdict
-import math
 import re
 from timeit import timeit
 
@@ -60,16 +59,11 @@ class Case:
     @classmethod
     def _divide_results(cls, results, num, div, name):
         "utility method to create ratios of two implementation"
-        avg_str = "> mean of values"
         if div in results and num in results:
             num_dict = results[num]
             div_dict = results[div]
-            assert avg_str not in num_dict and avg_str not in div_dict
             assert num_dict.keys() == div_dict.keys()
             results[name] = {m: num_dict[m] / div_dict[m] for m in div_dict}
-            not_na = [v for v in results[name].values() if not math.isnan(v)]
-            avg = sum(not_na) / len(not_na)
-            results[name][avg_str] = avg
 
     @classmethod
     def update_results(cls, results):
@@ -110,7 +104,7 @@ class Case:
                     value = timeit(call, number=t_num)
                     print(".", end="", flush=True)
                 except Exception as e:
-                    fails.append(f"{name}::{m} error: {e}")
+                    fails.append(f"{name}::{m} error: {e!r}")
                     print("x", end="", flush=True)
                     value = float("nan")
 
@@ -119,5 +113,7 @@ class Case:
             for f in fails:
                 print("\t", f)
 
+        before = set(results)
         cls.update_results(results)
-        return results, [name for name, _ in objects]
+        after = set(results)
+        return results, [name for name, _ in objects], after - before
index 587a9127dcdd9fb750352b7df1d05479ab1b2463..39efdbb36b6fe502715db1ce39c2991496450d95 100644 (file)
@@ -1,5 +1,6 @@
 from collections import defaultdict
 from datetime import datetime
+import math
 import subprocess
 
 import sqlalchemy as sa
@@ -146,22 +147,54 @@ def main():
         else:
             compare_by_meth = {}
 
-        result_by_impl, impl_names = case.run_case(args.factor, args.filter)
+        result_by_impl, impl_names, computed = case.run_case(
+            args.factor, args.filter
+        )
 
+        add_mean = set(computed)
+        add_sum = set(impl_names)
         result_by_method = defaultdict(dict)
         all_impls = dict.fromkeys(result_by_impl)
+        sum_str = "> sum of values"
+        avg_str = "> mean of values"
         for impl in result_by_impl:
             for meth in result_by_impl[impl]:
                 meth_dict = result_by_method[meth]
                 meth_dict[impl] = result_by_impl[impl][meth]
                 if meth in compare_by_meth and impl in compare_by_meth[meth]:
                     cmp_impl = f"compare {impl}"
+                    add_sum.add(cmp_impl)
                     over = f"{impl} / compare"
+                    add_mean.add(over)
                     all_impls[cmp_impl] = None
                     all_impls[over] = None
                     meth_dict[cmp_impl] = compare_by_meth[meth][impl]
                     meth_dict[over] = meth_dict[impl] / meth_dict[cmp_impl]
 
+        for impl in add_sum | add_mean:
+            if impl in result_by_impl:
+                data = result_by_impl[impl]
+            else:
+                key = next(iter(result_by_method))
+                assert impl in result_by_method[key]
+                data = {
+                    m: md[impl]
+                    for m, md in result_by_method.items()
+                    if impl in md
+                }
+
+            if impl in add_sum:
+                assert sum_str not in data
+                total = sum(data.values())
+                result_by_method[sum_str][impl] = total
+            if impl in add_mean:
+                assert avg_str not in data
+                not_na = [v for v in data.values() if not math.isnan(v)]
+                if not not_na:
+                    continue
+                avg = sum(not_na) / len(not_na)
+                result_by_method[avg_str][impl] = avg
+
         tabulate(list(all_impls), result_by_method)
 
         if args.csv:
index b3f7145cb588b34faca90b081288e853f4f054ba..1dc1792e803d3c97af718137ae454dbeed8bc5a1 100644 (file)
@@ -3,6 +3,7 @@ from __future__ import annotations
 from dataclasses import dataclass
 from itertools import product
 from operator import itemgetter
+from types import FunctionType
 from typing import Callable
 from typing import Optional
 
@@ -11,11 +12,55 @@ from sqlalchemy.dialects import sqlite
 from sqlalchemy.engine import cursor
 from sqlalchemy.engine import result
 from sqlalchemy.engine.default import DefaultExecutionContext
+from sqlalchemy.util.langhelpers import load_uncompiled_module
 from .base import Case
 from .base import test_case
 
 
 class _CommonResult(Case):
+
+    @staticmethod
+    def _load_python_module():
+        from sqlalchemy.engine import _result_cy
+
+        py_result = load_uncompiled_module(_result_cy)
+        assert not py_result._is_compiled()
+        # NOTE: the enums must be couped over otherwise they are not the
+        # same object and `is` comparisons fail
+        py_result._NoRow = _result_cy._NoRow
+        py_result._NO_ROW = _result_cy._NO_ROW
+        return py_result
+
+    @staticmethod
+    def _make_subclass(
+        name: str, result_internal: type, base: type[result.Result]
+    ) -> type[result.Result]:
+        # Need to also create a python version of the scalar result
+        class PyScalarResult(result_internal, result.ScalarResult):
+            _fetchiter_impl = result.ScalarResult._fetchiter_impl
+            _fetchone_impl = result.ScalarResult._fetchone_impl
+            _fetchmany_impl = result.ScalarResult._fetchmany_impl
+            _fetchall_impl = result.ScalarResult._fetchall_impl
+            _soft_close = result.ScalarResult._soft_close
+
+        def scalars(self, index=0):
+            return PyScalarResult(self, index)
+
+        cls_dict = dict(
+            _fetchiter_impl=base._fetchiter_impl,
+            _fetchone_impl=base._fetchone_impl,
+            _fetchmany_impl=base._fetchmany_impl,
+            _fetchall_impl=base._fetchall_impl,
+            _soft_close=base._soft_close,
+            scalars=scalars,
+        )
+
+        return type(name, (result_internal, base), cls_dict)
+
+    @classmethod
+    def update_results(cls, results):
+        cls._divide_results(results, "cython", "python", "cy / py")
+
     @classmethod
     def init_class(cls):
         # 3-col
@@ -173,10 +218,31 @@ class IteratorResult(_CommonResult):
     impl: result.IteratorResult
 
     @staticmethod
-    def default():
-        return cursor.IteratorResult
+    def python():
+        py_result = _CommonResult._load_python_module()
+
+        PyIteratorResult = _CommonResult._make_subclass(
+            "PyIteratorResult",
+            py_result.BaseResultInternal,
+            result.IteratorResult,
+        )
+
+        assert PyIteratorResult._allrows.__class__ is FunctionType
+        return PyIteratorResult
+
+    @staticmethod
+    def cython():
+        from sqlalchemy.engine import _result_cy
 
-    IMPLEMENTATIONS = {"default": default.__func__}
+        assert _result_cy._is_compiled()
+
+        assert result.IteratorResult._allrows.__class__ is not FunctionType
+        return result.IteratorResult
+
+    IMPLEMENTATIONS = {
+        "python": python.__func__,
+        "cython": cython.__func__,
+    }
 
     @classmethod
     def get_init_args_callable(
@@ -196,10 +262,30 @@ class CursorResult(_CommonResult):
     impl: cursor.CursorResult
 
     @staticmethod
-    def default():
+    def python():
+        py_result = _CommonResult._load_python_module()
+
+        PyCursorResult = _CommonResult._make_subclass(
+            "PyCursorResult",
+            py_result.BaseResultInternal,
+            cursor.CursorResult,
+        )
+
+        return PyCursorResult
+
+    @staticmethod
+    def cython():
+        from sqlalchemy.engine import _result_cy
+
+        assert _result_cy._is_compiled()
+
+        assert cursor.CursorResult._allrows.__class__ is not FunctionType
         return cursor.CursorResult
 
-    IMPLEMENTATIONS = {"default": default.__func__}
+    IMPLEMENTATIONS = {
+        "python": python.__func__,
+        "cython": cython.__func__,
+    }
 
     @classmethod
     def get_init_args_callable(
index 36c1a204d865e9d5c246afa68bf5b7dbc7773747..4a2254196f5becd5adaaadc7753abc00c5876f10 100644 (file)
@@ -310,16 +310,16 @@ test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching
 
 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline
 
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 15356
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 13312
 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 25384
-test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 15358
+test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 13347
 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 25386
 
 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols
 
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 21463
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 19455
 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 25491
-test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 21465
+test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 19454
 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 25493
 
 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased
@@ -542,15 +542,15 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_6
 
 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0]
 
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 18
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 17
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_nocextensions 18
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_nocextensions 14
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_cextensions 21
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_nocextensions 19
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_nocextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 14
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_cextensions 18
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_nocextensions 18
@@ -560,21 +560,21 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_oracle_oracledb_dbapiunicode_nocextensions 19
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_cextensions 14
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_nocextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 14
 
 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1]
 
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 18
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 17
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_nocextensions 20
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_nocextensions 16
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_cextensions 21
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_nocextensions 21
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_nocextensions 16
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 17
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 13
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 18
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_cextensions 18
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_nocextensions 20
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_mssql_pyodbc_dbapiunicode_cextensions 14
@@ -583,21 +583,21 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_oracle_oracledb_dbapiunicode_nocextensions 21
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_cextensions 14
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_nocextensions 16
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 17
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 13
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 18
 
 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2]
 
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 18
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 17
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_nocextensions 20
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_nocextensions 16
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_cextensions 21
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_nocextensions 21
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 14
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 13
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_nocextensions 16
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 17
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 13
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 18
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_cextensions 18
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_nocextensions 20
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_mssql_pyodbc_dbapiunicode_cextensions 14
@@ -606,21 +606,21 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_oracle_oracledb_dbapiunicode_nocextensions 21
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_cextensions 14
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_nocextensions 16
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 14
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 17
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 13
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 18
 
 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1]
 
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_cextensions 23
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_mariadb_mysqldb_dbapiunicode_nocextensions 25
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 17
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_cextensions 16
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_mssql_pyodbc_dbapiunicode_nocextensions 19
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_cextensions 28
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_oracle_oracledb_dbapiunicode_nocextensions 27
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 17
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_cextensions 16
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_postgresql_psycopg2_dbapiunicode_nocextensions 19
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 17
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 19
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_cextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.13_sqlite_pysqlite_dbapiunicode_nocextensions 21
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_cextensions 23
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_mariadb_mysqldb_dbapiunicode_nocextensions 25
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_mssql_pyodbc_dbapiunicode_cextensions 17
@@ -629,8 +629,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_oracle_oracledb_dbapiunicode_nocextensions 27
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_cextensions 17
 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_postgresql_psycopg2_dbapiunicode_nocextensions 19
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 17
-test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 19
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_cextensions 16
+test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.14_sqlite_pysqlite_dbapiunicode_nocextensions 21
 
 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string
 
index 81778d6b5ad67a7b85004130db6e5228d7933f3f..f2dbaecc1687b46e899145a7c2ad87790e264e0b 100644 (file)
@@ -45,10 +45,13 @@ def run_file(cmd: code_writer_cmd, file: Path):
 
     content = section_re.sub(repl_fn, content)
     if count == 0:
-        raise ValueError(
-            "Expected to find comment '# START GENERATED CYTHON IMPORT' "
-            f"in cython file {file}, but none found"
-        )
+        if content:
+            raise ValueError(
+                "Expected to find comment '# START GENERATED CYTHON IMPORT' "
+                f"in cython file {file}, but none found and the file is not "
+                "empty"
+            )
+        content = code
     if count > 1:
         raise ValueError(
             "Expected to find a single comment '# START GENERATED CYTHON "