diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fbacfde..fd1797e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,7 @@ jobs: - name: Run tests env: PYTEST_ADDOPTS: '-v --ddtrace --ignore=ddtestopt' + _DDTESTOPT_SUBPROCESS_TEST_MODE: "true" DD_API_KEY: ${{ secrets.DD_API_KEY }} DD_CIVISIBILITY_AGENTLESS_ENABLED: '1' run: hatch test -i py=${{ matrix.python-version }} -i pytest=${{ matrix.pytest-version}} diff --git a/tests/integration_fixtures.py b/tests/integration_fixtures.py new file mode 100644 index 0000000..c97feef --- /dev/null +++ b/tests/integration_fixtures.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python3 +"""Test fixtures for integration tests. + +This module provides an approach to prepare fixtures and run pytester in in-process or subprocess modes. +""" + +from contextlib import contextmanager +from dataclasses import asdict +from dataclasses import dataclass +import json +import os +import typing as t +from unittest.mock import Mock +from unittest.mock import patch + +from _pytest.pytester import Pytester + +from ddtestopt.internal.test_data import ModuleRef +from ddtestopt.internal.test_data import SuiteRef +from ddtestopt.internal.test_data import TestRef +from ddtestopt.internal.utils import asbool +from tests.mocks import APIClientMockBuilder +from tests.mocks import BackendConnectorMockBuilder +from tests.mocks import get_mock_git_instance + + +def nodeid_to_test_ref(nodeid: str) -> TestRef: + """Convert pytest nodeid to TestRef object. + + Example: "test_file.py::test_name" → TestRef(...) + """ + if "::" not in nodeid: + raise ValueError(f"Invalid test nodeid (missing '::'): {nodeid}") + + file_path, test_name = nodeid.split("::", 1) + module_ref = ModuleRef(".") + suite_ref = SuiteRef(module_ref, file_path) + return TestRef(suite_ref, test_name) + + +def nodeid_to_suite_ref(nodeid: str) -> SuiteRef: + """Convert pytest nodeid to SuiteRef object. + + Example: "test_file.py" → SuiteRef(...) + """ + if "::" in nodeid: + raise ValueError(f"Cannot convert test nodeid to suite: {nodeid}") + + file_path = nodeid + module_ref = ModuleRef(".") + return SuiteRef(module_ref, file_path) + + +@dataclass +class MockFixture: + """Simple test fixture configuration using pytest nodeids. + + Uses simple strings (pytest nodeids) for much simpler JSON serialization. + Examples: + - "test_file.py::test_name" for individual tests + - "test_file.py" for entire test files/suites + """ + + # API client settings + skipping_enabled: bool = False + auto_retries_enabled: bool = False + efd_enabled: bool = False + test_management_enabled: bool = False + known_tests_enabled: bool = False + + # Simple string lists - much easier to serialize/deserialize + skippable_items: t.Optional[t.List[str]] = None # pytest nodeids + known_tests: t.Optional[t.List[str]] = None # pytest nodeids + + # Environment variables for the test + env_vars: t.Optional[t.Dict[str, str]] = None + + def __post_init__(self) -> None: + """Initialize empty containers if None.""" + if self.skippable_items is None: + self.skippable_items = [] + if self.known_tests is None: + self.known_tests = [] + if self.env_vars is None: + self.env_vars = {} + + @property + def parsed_skippable_items(self) -> t.Set[t.Union[TestRef, SuiteRef]]: + """Parse skippable nodeids to TestRef/SuiteRef objects.""" + items: t.Set[t.Union[TestRef, SuiteRef]] = set() + if not self.skippable_items: + return items + + for nodeid in self.skippable_items: + if "::" in nodeid: + # It's a test reference + items.add(nodeid_to_test_ref(nodeid)) + else: + # It's a suite/file reference + items.add(nodeid_to_suite_ref(nodeid)) + return items + + @property + def parsed_known_tests(self) -> t.Set[TestRef]: + """Parse known test nodeids to TestRef objects.""" + if not self.known_tests: + return set() + return {nodeid_to_test_ref(nodeid) for nodeid in self.known_tests} + + +def create_patchers(fixture: MockFixture) -> t.List[t.Any]: + """Create all patch objects. + + Args: + fixture: MockFixture object with test configuration + + Returns: + List of patcher objects + """ + # Create mock git instance using existing helper + mock_git_instance = get_mock_git_instance() + + # Create mock writer (simple mock, no builder needed for this) + mock_writer = Mock() + mock_writer.flush.return_value = None + mock_writer._send_events.return_value = None + + # Create mock backend connector using builder + mock_connector = BackendConnectorMockBuilder().build() + + # Create API client mock using builder with fixture configuration + api_builder = APIClientMockBuilder() + + api_builder.with_skipping_enabled(enabled=fixture.skipping_enabled).with_auto_retries( + enabled=fixture.auto_retries_enabled + ).with_early_flake_detection(enabled=fixture.efd_enabled).with_test_management( + enabled=fixture.test_management_enabled + ).with_known_tests( + enabled=fixture.known_tests_enabled, tests=fixture.parsed_known_tests + ).with_skippable_items( + fixture.parsed_skippable_items + ) + + mock_api_client = api_builder.build() + + patchers = [ + patch("ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client), + patch("ddtestopt.internal.session_manager.get_git_tags", return_value={}), + patch("ddtestopt.internal.session_manager.get_platform_tags", return_value={}), + patch("ddtestopt.internal.session_manager.Git", return_value=mock_git_instance), + patch("ddtestopt.internal.http.BackendConnector", return_value=mock_connector), + patch("ddtestopt.internal.writer.TestOptWriter", return_value=mock_writer), + patch("ddtestopt.internal.writer.TestCoverageWriter", return_value=mock_writer), + ] + return patchers + + +def start_patchers(fixture: MockFixture) -> t.List[t.Any]: + """Set up mocks (called from conftest.py). + + This function starts all patches (and leaves them running for the subprocess). + + Args: + fixture: MockFixture object with test configuration + """ + patchers = create_patchers(fixture) + + # Start all patches for subprocess mode + for patcher in patchers: + patcher.start() + + return patchers + + +def setup_mocks_for_in_process(fixture: MockFixture) -> t.ContextManager[None]: + """Set up mocks for in-process execution. + + Args: + fixture: MockFixture object with test configuration + + Returns: + Context manager that manages patch lifecycle + """ + + @contextmanager + def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: + patchers = start_patchers(fixture) + + try: + yield + finally: + # Stop all patches + for patcher in patchers: + patcher.stop() + + return _mock_context() + + +# DEV: This is imported inside subprocess conftest +def _setup_subprocess_mocks_from_fixture() -> None: + """Set up mocks by reading fixture JSON from environment variable.""" + fixture_json = os.getenv("_DDTESTOPT_FIXTURE_JSON") + if not fixture_json: + return + + # Parse JSON directly from environment variable + fixture_data = json.loads(fixture_json) + + fixture = MockFixture(**fixture_data) + + # Set up mocks using the simplified interface + start_patchers(fixture) + + +@contextmanager +def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Generator[None, None, None]: + """Set up subprocess mode with fixture JSON in environment variable.""" + conftest_content = '''#!/usr/bin/env python3 +"""Auto-generated conftest.py for fixture-based mocking.""" +import sys +from pathlib import Path + +# Add parent directory to path for imports +test_dir = Path(__file__).parent.parent +if str(test_dir) not in sys.path: + sys.path.insert(0, str(test_dir)) + +from tests.integration_fixtures import _setup_subprocess_mocks_from_fixture + +# Set up mocks as early as possible +_setup_subprocess_mocks_from_fixture() +''' + + # Set fixture JSON directly in environment variable + pytester._monkeypatch.setenv("_DDTESTOPT_FIXTURE_JSON", json.dumps(asdict(fixture))) + + # Set standard test environment variables + pytester._monkeypatch.setenv("DD_API_KEY", "test-api-key") + pytester._monkeypatch.setenv("DD_SERVICE", "test-service") + pytester._monkeypatch.setenv("DD_ENV", "test-env") + + if fixture.env_vars: + # Set additional environment variables from fixture + for key, value in fixture.env_vars.items(): + pytester._monkeypatch.setenv(key, value) + + # Create static conftest.py + pytester.makeconftest(conftest_content) + + yield + + +def get_subprocess_test_mode() -> bool: + """Get the test execution mode from environment variable. + + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=1 to force subprocess execution. + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=0 to force in-process execution. + """ + return asbool(os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE", "0")) + + +@contextmanager +def setup_test_mode_with_fixture( + pytester: Pytester, + fixture: MockFixture, + subprocess_mode: t.Optional[bool] = None, +) -> t.Generator[None, None, None]: + """Set up test environment with the given fixture. + + This is the main entry point that handles both subprocess and in-process modes. + """ + if subprocess_mode is None: + subprocess_mode = get_subprocess_test_mode() + + if subprocess_mode: + # Subprocess mode: create fixture file and static conftest.py + with _setup_subprocess_mode(pytester, fixture): + yield + else: + # In-process mode: use context manager + with setup_mocks_for_in_process(fixture): + yield + + +def run_pytest_with_fixture( + pytester: Pytester, + pytest_args: t.List[str], + fixture: MockFixture, + subprocess_mode: t.Optional[bool] = None, +) -> t.Any: + """Run a test with the given fixture configuration.""" + with setup_test_mode_with_fixture(pytester, fixture, subprocess_mode): + if subprocess_mode: + return pytester.runpytest_subprocess(*pytest_args) + else: + return pytester.runpytest(*pytest_args) diff --git a/tests/mocks.py b/tests/mocks.py index 5cc8cdc..c2a0f95 100644 --- a/tests/mocks.py +++ b/tests/mocks.py @@ -419,9 +419,8 @@ def with_request_response(self, method: str, path: str, response_data: t.Any) -> def build(self) -> Mock: """Build the BackendConnector mock.""" - from ddtestopt.internal.http import BackendConnector - - mock_connector = Mock(spec=BackendConnector) + # Create a simple Mock without spec to avoid CI environment issues + mock_connector = Mock() # Mock methods to prevent real HTTP calls def mock_post_json(endpoint: str, data: t.Any) -> t.Tuple[Mock, t.Any]: @@ -438,9 +437,9 @@ def mock_request(method: str, path: str, **kwargs: t.Any) -> t.Tuple[Mock, t.Any def mock_post_files(path: str, files: t.Any, **kwargs: t.Any) -> t.Tuple[Mock, t.Dict[str, t.Any]]: return Mock(), {} - mock_connector.post_json.side_effect = mock_post_json - mock_connector.request.side_effect = mock_request - mock_connector.post_files.side_effect = mock_post_files + mock_connector.post_json = Mock(side_effect=mock_post_json) + mock_connector.request = Mock(side_effect=mock_request) + mock_connector.post_files = Mock(side_effect=mock_post_files) return mock_connector @@ -534,10 +533,9 @@ def setup_standard_mocks() -> t.ContextManager[t.Any]: def network_mocks() -> t.ContextManager[t.Any]: """Create comprehensive mocks that prevent ALL network calls at multiple levels.""" - from contextlib import ExitStack def _create_stack() -> t.ContextManager[t.Any]: - stack = ExitStack() + stack = contextlib.ExitStack() # Mock the session manager dependencies stack.enter_context( diff --git a/tests/test_integration.py b/tests/test_integration.py index 22ba267..2ae67aa 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -4,17 +4,14 @@ from unittest.mock import Mock from unittest.mock import patch -from _pytest.monkeypatch import MonkeyPatch from _pytest.pytester import Pytester import pytest from ddtestopt.internal.session_manager import SessionManager -from ddtestopt.internal.test_data import ModuleRef -from ddtestopt.internal.test_data import SuiteRef -from ddtestopt.internal.test_data import TestRef from ddtestopt.internal.test_data import TestSession +from tests.integration_fixtures import MockFixture +from tests.integration_fixtures import run_pytest_with_fixture from tests.mocks import mock_api_client_settings -from tests.mocks import network_mocks from tests.mocks import setup_standard_mocks @@ -36,18 +33,18 @@ def test_simple(): """ ) - # Use network mocks to prevent all real HTTP calls - with network_mocks(), patch("ddtestopt.internal.session_manager.APIClient") as mock_api_client: - mock_api_client.return_value = mock_api_client_settings() + # Create simple fixture with default settings + fixture = MockFixture() - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Test should pass assert result.ret == 0 result.assert_outcomes(passed=1) @pytest.mark.slow - def test_retry_functionality_with_pytester(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> None: + def test_retry_functionality_with_pytester(self, pytester: Pytester) -> None: """Test that failing tests are retried when auto retry is enabled.""" # Create a test file with a failing test pytester.makepyfile( @@ -62,11 +59,14 @@ def test_passes(): """ ) - # Use network mocks to prevent all real HTTP calls - with network_mocks(), patch("ddtestopt.internal.session_manager.APIClient") as mock_api_client: - mock_api_client.return_value = mock_api_client_settings(auto_retries_enabled=True) - monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Set retry-related environment variables + pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") + + # Create fixture with auto retries enabled + fixture = MockFixture(auto_retries_enabled=True, env_vars={"DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2"}) + + # Run test with auto retries configuration + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that the test failed after retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -84,7 +84,7 @@ def test_passes(): # Verify that retries happened - should see "RETRY FAILED (Auto Test Retries)" messages # DEV: We configured DD_CIVISIBILITY_FLAKY_RETRY_COUNT=2 # BUT the plugin will show 3 retry attempts (as it includes the initial attempt) - retry_messages = output.count("RETRY FAILED (Auto Test Retries)") + retry_messages = output.count("test_always_fails RETRY FAILED (Auto Test Retries)") assert retry_messages == 3, f"Expected 3 retry messages, got {retry_messages}" # Should see the final summary mentioning dd_retry @@ -110,19 +110,14 @@ def test_known_test(): """ ) - # Set up known tests - only include the "known" test - known_suite = SuiteRef(ModuleRef("."), "test_efd.py") - known_test_ref = TestRef(known_suite, "test_known_test") + # Define the known test for this test scenario using simple nodeid + known_test_nodeid = "test_efd.py::test_known_test" - # Use unified mock setup with EFD enabled - with patch( - "ddtestopt.internal.session_manager.APIClient", - return_value=mock_api_client_settings( - efd_enabled=True, known_tests_enabled=True, known_tests={known_test_ref} - ), - ), setup_standard_mocks(): + # Create fixture with EFD enabled and known tests + fixture = MockFixture(efd_enabled=True, known_tests_enabled=True, known_tests=[known_test_nodeid]) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run test with EFD configuration + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that the test failed after EFD retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -165,17 +160,14 @@ def test_should_run(): """ ) - # Set up skippable tests - mark one test as skippable - skippable_suite = SuiteRef(ModuleRef("."), "test_itr.py") - skippable_test_ref = TestRef(skippable_suite, "test_should_be_skipped") + # Define the skippable test for this test scenario using simple nodeid + skippable_test_nodeid = "test_itr.py::test_should_be_skipped" - # Use unified mock setup with ITR enabled - with patch( - "ddtestopt.internal.session_manager.APIClient", - return_value=mock_api_client_settings(skipping_enabled=True, skippable_items={skippable_test_ref}), - ), setup_standard_mocks(): + # Create fixture with skipping enabled + fixture = MockFixture(skipping_enabled=True, skippable_items=[skippable_test_nodeid]) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run test with ITR configuration + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that tests completed successfully assert result.ret == 0 # Exit code 0 indicates success @@ -216,12 +208,11 @@ def test_with_assertion(): """ ) - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): + # Create simple fixture with default settings + fixture = MockFixture() - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Check that tests ran successfully assert result.ret == 0 @@ -243,69 +234,18 @@ def test_passing(): """ ) - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): + # Create simple fixture with default settings + fixture = MockFixture() - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Check that one test failed and one passed assert result.ret == 1 # pytest exits with 1 when tests fail result.assert_outcomes(passed=1, failed=1) @pytest.mark.slow - def test_plugin_loads_correctly(self, pytester: Pytester) -> None: - """Test that the ddtestopt plugin loads without errors.""" - # Create test file using pytester - pytester.makepyfile( - """ - def test_plugin_loaded(): - '''Test to verify plugin is loaded.''' - assert True - """ - ) - - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): - - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v") - - # Should run without plugin loading errors - assert result.ret == 0 - result.assert_outcomes(passed=1) - - # Should not have any error messages about plugin loading - output = result.stdout.str() - assert "Error setting up Test Optimization plugin" not in output - - @pytest.mark.slow - def test_test_session_name_extraction(self, pytester: Pytester) -> None: - """Test that the pytest session command is properly extracted.""" - # Create test file using pytester - pytester.makepyfile( - """ - def test_command_extraction(): - '''Test for command extraction functionality.''' - assert True - """ - ) - - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): - - # Run with specific arguments that should be captured - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v") - - assert result.ret == 0 - result.assert_outcomes(passed=1) - - @pytest.mark.slow - def test_retry_environment_variables_respected(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> None: + def test_retry_environment_variables_respected(self, pytester: Pytester) -> None: """Test that retry environment variables are properly read by the plugin.""" # Create test file using pytester pytester.makepyfile( @@ -323,17 +263,22 @@ def test_simple_pass(): """ ) - # Set up mocks and environment (including retry env vars) - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): - # Set all environment variables via monkeypatch + # Set retry-related environment variables + pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_ENABLED", "true") + pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") + pytester._monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") - monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_ENABLED", "true") - monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") - monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") + # Create fixture with environment variables + fixture = MockFixture( + env_vars={ + "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED": "true", + "DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2", + "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": "5", + } + ) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Tests should pass assert result.ret == 0