diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index a78a36669ac7..15e032d5c4d6 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -24,6 +24,9 @@ jobs: uses: CodSpeedHQ/action@v3 with: token: ${{ secrets.CODSPEED_TOKEN }} - run: make unit_tests args="--codspeed -n auto" + run: | + uv run pytest src/backend/tests \ + --ignore=src/backend/tests/integration \ + --codspeed - name: Minimize uv cache run: uv cache prune --ci diff --git a/src/backend/tests/performance/__init__.py b/src/backend/tests/performance/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/backend/tests/performance/test_server_init.py b/src/backend/tests/performance/test_server_init.py new file mode 100644 index 000000000000..03a8571ee5c7 --- /dev/null +++ b/src/backend/tests/performance/test_server_init.py @@ -0,0 +1,61 @@ +import asyncio + +import pytest +from langflow.services.deps import get_settings_service + + +@pytest.mark.benchmark +async def test_initialize_services(): + """Benchmark the initialization of services.""" + from langflow.services.utils import initialize_services + + await asyncio.to_thread(initialize_services, fix_migration=False) + + +@pytest.mark.benchmark +async def test_setup_llm_caching(): + """Benchmark LLM caching setup.""" + from langflow.interface.utils import setup_llm_caching + + await asyncio.to_thread(setup_llm_caching) + + +@pytest.mark.benchmark +async def test_initialize_super_user(): + """Benchmark super user initialization.""" + from langflow.initial_setup.setup import initialize_super_user_if_needed + from langflow.services.utils import initialize_services + + await asyncio.to_thread(initialize_services, fix_migration=False) + await asyncio.to_thread(initialize_super_user_if_needed) + + +@pytest.mark.benchmark +async def test_get_and_cache_all_types_dict(): + """Benchmark get_and_cache_all_types_dict function.""" + from langflow.interface.types import get_and_cache_all_types_dict + + settings_service = await asyncio.to_thread(get_settings_service) + result = await asyncio.to_thread(get_and_cache_all_types_dict, settings_service) + assert result is not None + + +@pytest.mark.benchmark +async def test_create_starter_projects(): + """Benchmark creation of starter projects.""" + from langflow.initial_setup.setup import create_or_update_starter_projects + from langflow.interface.types import get_and_cache_all_types_dict + from langflow.services.utils import initialize_services + + await asyncio.to_thread(initialize_services, fix_migration=False) + settings_service = await asyncio.to_thread(get_settings_service) + types_dict = await get_and_cache_all_types_dict(settings_service) + await asyncio.to_thread(create_or_update_starter_projects, types_dict) + + +@pytest.mark.benchmark +async def test_load_flows(): + """Benchmark loading flows from directory.""" + from langflow.initial_setup.setup import load_flows_from_directory + + await asyncio.to_thread(load_flows_from_directory) diff --git a/src/backend/tests/unit/test_chat_endpoint.py b/src/backend/tests/unit/test_chat_endpoint.py index 9d267f266818..9fe66e3798cf 100644 --- a/src/backend/tests/unit/test_chat_endpoint.py +++ b/src/backend/tests/unit/test_chat_endpoint.py @@ -1,11 +1,13 @@ import json from uuid import UUID +import pytest from langflow.memory import get_messages from langflow.services.database.models.flow import FlowCreate, FlowUpdate from orjson import orjson +@pytest.mark.benchmark async def test_build_flow(client, json_memory_chatbot_no_llm, logged_in_headers): flow_id = await _create_flow(client, json_memory_chatbot_no_llm, logged_in_headers) @@ -15,6 +17,7 @@ async def test_build_flow(client, json_memory_chatbot_no_llm, logged_in_headers) check_messages(flow_id) +@pytest.mark.benchmark async def test_build_flow_from_request_data(client, json_memory_chatbot_no_llm, logged_in_headers): flow_id = await _create_flow(client, json_memory_chatbot_no_llm, logged_in_headers) response = await client.get("api/v1/flows/" + str(flow_id), headers=logged_in_headers) diff --git a/src/backend/tests/unit/test_endpoints.py b/src/backend/tests/unit/test_endpoints.py index 352766f10645..22efe00bb04e 100644 --- a/src/backend/tests/unit/test_endpoints.py +++ b/src/backend/tests/unit/test_endpoints.py @@ -108,6 +108,7 @@ async def poll_task_status(client, headers, href, max_attempts=20, sleep_time=1) } +@pytest.mark.benchmark async def test_get_all(client: AsyncClient, logged_in_headers): response = await client.get("api/v1/all", headers=logged_in_headers) assert response.status_code == 200 @@ -329,6 +330,7 @@ async def test_successful_run_with_output_type_text(client, simple_api_test, cre assert all(key in result for result in inner_results for key in expected_keys), outputs_dict +@pytest.mark.benchmark async def test_successful_run_with_output_type_any(client, simple_api_test, created_api_key): # This one should have both the ChatOutput and TextOutput components headers = {"x-api-key": created_api_key.api_key} @@ -360,6 +362,7 @@ async def test_successful_run_with_output_type_any(client, simple_api_test, crea assert all(key in result for result in inner_results for key in expected_keys), outputs_dict +@pytest.mark.benchmark async def test_successful_run_with_output_type_debug(client, simple_api_test, created_api_key): # This one should return outputs for all components # Let's just check the amount of outputs(there should be 7) @@ -385,6 +388,7 @@ async def test_successful_run_with_output_type_debug(client, simple_api_test, cr assert len(outputs_dict.get("outputs")) == 3 +@pytest.mark.benchmark async def test_successful_run_with_input_type_text(client, simple_api_test, created_api_key): headers = {"x-api-key": created_api_key.api_key} flow_id = simple_api_test["id"] @@ -419,6 +423,7 @@ async def test_successful_run_with_input_type_text(client, simple_api_test, crea @pytest.mark.api_key_required +@pytest.mark.benchmark async def test_successful_run_with_input_type_chat(client: AsyncClient, simple_api_test, created_api_key): headers = {"x-api-key": created_api_key.api_key} flow_id = simple_api_test["id"] @@ -451,6 +456,7 @@ async def test_successful_run_with_input_type_chat(client: AsyncClient, simple_a ), chat_input_outputs +@pytest.mark.benchmark async def test_invalid_run_with_input_type_chat(client, simple_api_test, created_api_key): headers = {"x-api-key": created_api_key.api_key} flow_id = simple_api_test["id"] @@ -465,6 +471,7 @@ async def test_invalid_run_with_input_type_chat(client, simple_api_test, created assert "If you pass an input_value to the chat input, you cannot pass a tweak with the same name." in response.text +@pytest.mark.benchmark async def test_successful_run_with_input_type_any(client, simple_api_test, created_api_key): headers = {"x-api-key": created_api_key.api_key} flow_id = simple_api_test["id"] @@ -517,6 +524,7 @@ async def test_invalid_flow_id(client, created_api_key): # Check if the error detail is as expected +@pytest.mark.benchmark async def test_starter_projects(client, created_api_key): headers = {"x-api-key": created_api_key.api_key} response = await client.get("api/v1/starter-projects/", headers=headers)