]> git.ipfire.org Git - thirdparty/fastapi/fastapi.git/commitdiff
👷 Add performance tests with CodSpeed (#14558)
authorSebastián Ramírez <tiangolo@gmail.com>
Thu, 18 Dec 2025 13:24:09 +0000 (05:24 -0800)
committerGitHub <noreply@github.com>
Thu, 18 Dec 2025 13:24:09 +0000 (14:24 +0100)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
.github/workflows/test.yml
.gitignore
requirements-tests.txt
tests/benchmarks/__init__.py [new file with mode: 0644]
tests/benchmarks/test_general_performance.py [new file with mode: 0644]

index 8a839a928a8b70e8b0fcbf2d5e2c45c47e1f30df..cc906eaf6d4c45e7b92aa375a5450060410b2c65 100644 (file)
@@ -66,6 +66,10 @@ jobs:
             python-version: "3.13"
             pydantic-version: "pydantic>=2.0.2,<3.0.0"
             coverage: coverage
+          - os: ubuntu-latest
+            python-version: "3.13"
+            pydantic-version: "pydantic>=2.0.2,<3.0.0"
+            coverage: coverage
           - os: ubuntu-latest
             python-version: "3.14"
             pydantic-version: "pydantic>=2.0.2,<3.0.0"
@@ -100,6 +104,15 @@ jobs:
         env:
           COVERAGE_FILE: coverage/.coverage.${{ runner.os }}-py${{ matrix.python-version }}
           CONTEXT: ${{ runner.os }}-py${{ matrix.python-version }}
+      - name: CodSpeed benchmarks
+        if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.13' && matrix.pydantic-version == 'pydantic>=2.0.2,<3.0.0'
+        uses: CodSpeedHQ/action@v4
+        env:
+            COVERAGE_FILE: coverage/.coverage.${{ runner.os }}-py${{ matrix.python-version }}
+            CONTEXT: ${{ runner.os }}-py${{ matrix.python-version }}
+        with:
+          mode: simulation
+          run: coverage run -m pytest tests/ --codspeed
       # Do not store coverage for all possible combinations to avoid file size max errors in Smokeshow
       - name: Store coverage files
         if: matrix.coverage == 'coverage'
index 6016ffa598a565a00842fc309c0dbb9392d7daa9..3dc12ca951f5d4cb8696adac604486c8beaba053 100644 (file)
@@ -31,3 +31,5 @@ archive.zip
 
 # Ignore while the setup still depends on requirements.txt files
 uv.lock
+
+.codspeed
index c5de4157e777af3f4f8ae3274def49809769df42..ee188b496cf7386f2701234a48afdf43d3e64f56 100644 (file)
@@ -11,6 +11,7 @@ PyJWT==2.9.0
 pyyaml >=5.3.1,<7.0.0
 pwdlib[argon2] >=0.2.1
 inline-snapshot>=0.21.1
+pytest-codspeed==4.2.0
 # types
 types-ujson ==5.10.0.20240515
 types-orjson ==3.6.2
diff --git a/tests/benchmarks/__init__.py b/tests/benchmarks/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/benchmarks/test_general_performance.py b/tests/benchmarks/test_general_performance.py
new file mode 100644 (file)
index 0000000..dca3613
--- /dev/null
@@ -0,0 +1,396 @@
+import json
+import sys
+from collections.abc import Iterator
+from typing import Annotated, Any
+
+import pytest
+from fastapi import Depends, FastAPI
+from fastapi.testclient import TestClient
+
+if "--codspeed" not in sys.argv:
+    pytest.skip(
+        "Benchmark tests are skipped by default; run with --codspeed.",
+        allow_module_level=True,
+    )
+
+LARGE_ITEMS: list[dict[str, Any]] = [
+    {
+        "id": i,
+        "name": f"item-{i}",
+        "values": list(range(25)),
+        "meta": {
+            "active": True,
+            "group": i % 10,
+            "tag": f"t{i % 5}",
+        },
+    }
+    for i in range(300)
+]
+
+LARGE_METADATA: dict[str, Any] = {
+    "source": "benchmark",
+    "version": 1,
+    "flags": {"a": True, "b": False, "c": True},
+    "notes": ["x" * 50, "y" * 50, "z" * 50],
+}
+
+LARGE_PAYLOAD: dict[str, Any] = {"items": LARGE_ITEMS, "metadata": LARGE_METADATA}
+
+
+def dep_a():
+    return 40
+
+
+def dep_b(a: Annotated[int, Depends(dep_a)]):
+    return a + 2
+
+
+@pytest.fixture(
+    scope="module",
+    params=[
+        "pydantic-v2",
+        "pydantic-v1",
+    ],
+)
+def basemodel_class(request: pytest.FixtureRequest) -> type[Any]:
+    if request.param == "pydantic-v2":
+        from pydantic import BaseModel
+
+        return BaseModel
+    else:
+        from pydantic.v1 import BaseModel
+
+        return BaseModel
+
+
+@pytest.fixture(scope="module")
+def app(basemodel_class: type[Any]) -> FastAPI:
+    class ItemIn(basemodel_class):
+        name: str
+        value: int
+
+    class ItemOut(basemodel_class):
+        name: str
+        value: int
+        dep: int
+
+    class LargeIn(basemodel_class):
+        items: list[dict[str, Any]]
+        metadata: dict[str, Any]
+
+    class LargeOut(basemodel_class):
+        items: list[dict[str, Any]]
+        metadata: dict[str, Any]
+
+    app = FastAPI()
+
+    @app.post("/sync/validated", response_model=ItemOut)
+    def sync_validated(item: ItemIn, dep: Annotated[int, Depends(dep_b)]):
+        return ItemOut(name=item.name, value=item.value, dep=dep)
+
+    @app.get("/sync/dict-no-response-model")
+    def sync_dict_no_response_model():
+        return {"name": "foo", "value": 123}
+
+    @app.get("/sync/dict-with-response-model", response_model=ItemOut)
+    def sync_dict_with_response_model(
+        dep: Annotated[int, Depends(dep_b)],
+    ):
+        return {"name": "foo", "value": 123, "dep": dep}
+
+    @app.get("/sync/model-no-response-model")
+    def sync_model_no_response_model(dep: Annotated[int, Depends(dep_b)]):
+        return ItemOut(name="foo", value=123, dep=dep)
+
+    @app.get("/sync/model-with-response-model", response_model=ItemOut)
+    def sync_model_with_response_model(dep: Annotated[int, Depends(dep_b)]):
+        return ItemOut(name="foo", value=123, dep=dep)
+
+    @app.post("/async/validated", response_model=ItemOut)
+    async def async_validated(
+        item: ItemIn,
+        dep: Annotated[int, Depends(dep_b)],
+    ):
+        return ItemOut(name=item.name, value=item.value, dep=dep)
+
+    @app.post("/sync/large-receive")
+    def sync_large_receive(payload: LargeIn):
+        return {"received": len(payload.items)}
+
+    @app.post("/async/large-receive")
+    async def async_large_receive(payload: LargeIn):
+        return {"received": len(payload.items)}
+
+    @app.get("/sync/large-dict-no-response-model")
+    def sync_large_dict_no_response_model():
+        return LARGE_PAYLOAD
+
+    @app.get("/sync/large-dict-with-response-model", response_model=LargeOut)
+    def sync_large_dict_with_response_model():
+        return LARGE_PAYLOAD
+
+    @app.get("/sync/large-model-no-response-model")
+    def sync_large_model_no_response_model():
+        return LargeOut(items=LARGE_ITEMS, metadata=LARGE_METADATA)
+
+    @app.get("/sync/large-model-with-response-model", response_model=LargeOut)
+    def sync_large_model_with_response_model():
+        return LargeOut(items=LARGE_ITEMS, metadata=LARGE_METADATA)
+
+    @app.get("/async/large-dict-no-response-model")
+    async def async_large_dict_no_response_model():
+        return LARGE_PAYLOAD
+
+    @app.get("/async/large-dict-with-response-model", response_model=LargeOut)
+    async def async_large_dict_with_response_model():
+        return LARGE_PAYLOAD
+
+    @app.get("/async/large-model-no-response-model")
+    async def async_large_model_no_response_model():
+        return LargeOut(items=LARGE_ITEMS, metadata=LARGE_METADATA)
+
+    @app.get("/async/large-model-with-response-model", response_model=LargeOut)
+    async def async_large_model_with_response_model():
+        return LargeOut(items=LARGE_ITEMS, metadata=LARGE_METADATA)
+
+    @app.get("/async/dict-no-response-model")
+    async def async_dict_no_response_model():
+        return {"name": "foo", "value": 123}
+
+    @app.get("/async/dict-with-response-model", response_model=ItemOut)
+    async def async_dict_with_response_model(
+        dep: Annotated[int, Depends(dep_b)],
+    ):
+        return {"name": "foo", "value": 123, "dep": dep}
+
+    @app.get("/async/model-no-response-model")
+    async def async_model_no_response_model(
+        dep: Annotated[int, Depends(dep_b)],
+    ):
+        return ItemOut(name="foo", value=123, dep=dep)
+
+    @app.get("/async/model-with-response-model", response_model=ItemOut)
+    async def async_model_with_response_model(
+        dep: Annotated[int, Depends(dep_b)],
+    ):
+        return ItemOut(name="foo", value=123, dep=dep)
+
+    return app
+
+
+@pytest.fixture(scope="module")
+def client(app: FastAPI) -> Iterator[TestClient]:
+    with TestClient(app) as client:
+        yield client
+
+
+def _bench_get(benchmark, client: TestClient, path: str) -> tuple[int, bytes]:
+    warmup = client.get(path)
+    assert warmup.status_code == 200
+
+    def do_request() -> tuple[int, bytes]:
+        response = client.get(path)
+        return response.status_code, response.content
+
+    return benchmark(do_request)
+
+
+def _bench_post_json(
+    benchmark, client: TestClient, path: str, json: dict[str, Any]
+) -> tuple[int, bytes]:
+    warmup = client.post(path, json=json)
+    assert warmup.status_code == 200
+
+    def do_request() -> tuple[int, bytes]:
+        response = client.post(path, json=json)
+        return response.status_code, response.content
+
+    return benchmark(do_request)
+
+
+def test_sync_receiving_validated_pydantic_model(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_post_json(
+        benchmark,
+        client,
+        "/sync/validated",
+        json={"name": "foo", "value": 123},
+    )
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_sync_return_dict_without_response_model(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_get(benchmark, client, "/sync/dict-no-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123}'
+
+
+def test_sync_return_dict_with_response_model(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_get(benchmark, client, "/sync/dict-with-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_sync_return_model_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(benchmark, client, "/sync/model-no-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_sync_return_model_with_response_model(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_get(benchmark, client, "/sync/model-with-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_async_receiving_validated_pydantic_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_post_json(
+        benchmark, client, "/async/validated", json={"name": "foo", "value": 123}
+    )
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_async_return_dict_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(benchmark, client, "/async/dict-no-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123}'
+
+
+def test_async_return_dict_with_response_model(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_get(benchmark, client, "/async/dict-with-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_async_return_model_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(benchmark, client, "/async/model-no-response-model")
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_async_return_model_with_response_model(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/async/model-with-response-model"
+    )
+    assert status_code == 200
+    assert body == b'{"name":"foo","value":123,"dep":42}'
+
+
+def test_sync_receiving_large_payload(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_post_json(
+        benchmark,
+        client,
+        "/sync/large-receive",
+        json=LARGE_PAYLOAD,
+    )
+    assert status_code == 200
+    assert body == b'{"received":300}'
+
+
+def test_async_receiving_large_payload(benchmark, client: TestClient) -> None:
+    status_code, body = _bench_post_json(
+        benchmark,
+        client,
+        "/async/large-receive",
+        json=LARGE_PAYLOAD,
+    )
+    assert status_code == 200
+    assert body == b'{"received":300}'
+
+
+def _expected_large_payload_json_bytes() -> bytes:
+    return json.dumps(
+        LARGE_PAYLOAD,
+        ensure_ascii=False,
+        allow_nan=False,
+        separators=(",", ":"),
+    ).encode("utf-8")
+
+
+def test_sync_return_large_dict_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/sync/large-dict-no-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_sync_return_large_dict_with_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/sync/large-dict-with-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_sync_return_large_model_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/sync/large-model-no-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_sync_return_large_model_with_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/sync/large-model-with-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_async_return_large_dict_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/async/large-dict-no-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_async_return_large_dict_with_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/async/large-dict-with-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_async_return_large_model_without_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/async/large-model-no-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()
+
+
+def test_async_return_large_model_with_response_model(
+    benchmark, client: TestClient
+) -> None:
+    status_code, body = _bench_get(
+        benchmark, client, "/async/large-model-with-response-model"
+    )
+    assert status_code == 200
+    assert body == _expected_large_payload_json_bytes()