From e6de8e2d9ade77332f86c151f4f410d0b55abc0d Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 15:24:30 +0200 Subject: [PATCH 01/20] test with subprocess integration tests --- pyproject.toml | 3 + tests/subprocess_mocks.py | 347 ++++++++++++++++++++++++++++++++++++++ tests/test_integration.py | 102 ++++++----- 3 files changed, 398 insertions(+), 54 deletions(-) create mode 100644 tests/subprocess_mocks.py diff --git a/pyproject.toml b/pyproject.toml index 23cb2ca..741711b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,9 @@ parallel = true retries = 2 retry-delay = 1 +[tool.hatch.envs.hatch-test.env-vars] +_DD_CIVISIBILITY_ITR_FORCE_ENABLE_COVERAGE = "true" + [[tool.hatch.envs.hatch-test.matrix]] python = ["3.13", "3.12"] pytest = ["8.1.2", "8.4.*"] diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py new file mode 100644 index 0000000..7adc9e5 --- /dev/null +++ b/tests/subprocess_mocks.py @@ -0,0 +1,347 @@ +#!/usr/bin/env python3 +"""Subprocess mocking utilities for test_integration.py refactoring. + +This module provides utilities to set up mocks in pytest subprocesses when using +pytester.runpytest_subprocess(). It works by: + +1. Serializing mock configuration to environment variables +2. Creating a conftest.py file that reads these variables and sets up mocks +3. Providing helper functions to generate the necessary conftest content +""" + +import json +import typing as t + +from _pytest.pytester import Pytester + +from ddtestopt.internal.test_data import ModuleRef +from ddtestopt.internal.test_data import SuiteRef +from ddtestopt.internal.test_data import TestRef + + +class SubprocessMockConfig: + """Configuration for mocks in subprocess environment.""" + + def __init__(self) -> None: + self.api_client_config = { + "skipping_enabled": False, + "auto_retries_enabled": False, + "efd_enabled": False, + "test_management_enabled": False, + "known_tests_enabled": False, + } + self.skippable_items: t.Set[t.Union[TestRef, SuiteRef]] = set() + self.known_tests: t.Set[TestRef] = set() + self.environment_vars: t.Dict[str, str] = {} + + def with_skipping_enabled(self, enabled: bool = True) -> "SubprocessMockConfig": + """Enable/disable test skipping.""" + self.api_client_config["skipping_enabled"] = enabled + return self + + def with_auto_retries_enabled(self, enabled: bool = True) -> "SubprocessMockConfig": + """Enable/disable auto retries.""" + self.api_client_config["auto_retries_enabled"] = enabled + return self + + def with_early_flake_detection(self, enabled: bool = True) -> "SubprocessMockConfig": + """Enable/disable early flake detection.""" + self.api_client_config["efd_enabled"] = enabled + return self + + def with_test_management(self, enabled: bool = True) -> "SubprocessMockConfig": + """Enable/disable test management.""" + self.api_client_config["test_management_enabled"] = enabled + return self + + def with_known_tests( + self, enabled: bool = True, tests: t.Optional[t.Set[TestRef]] = None + ) -> "SubprocessMockConfig": + """Configure known tests.""" + self.api_client_config["known_tests_enabled"] = enabled + if tests is not None: + self.known_tests = tests + return self + + def with_skippable_items(self, items: t.Set[t.Union[TestRef, SuiteRef]]) -> "SubprocessMockConfig": + """Set skippable test items.""" + self.skippable_items = items + return self + + def with_environment_vars(self, env_vars: t.Dict[str, str]) -> "SubprocessMockConfig": + """Set additional environment variables.""" + self.environment_vars.update(env_vars) + return self + + +def _serialize_test_ref(test_ref: TestRef) -> t.Dict[str, str]: + """Serialize a TestRef to a dictionary.""" + return { + "type": "TestRef", + "module_name": test_ref.suite.module.name, + "suite_name": test_ref.suite.name, + "test_name": test_ref.name, + } + + +def _serialize_suite_ref(suite_ref: SuiteRef) -> t.Dict[str, str]: + """Serialize a SuiteRef to a dictionary.""" + return { + "type": "SuiteRef", + "module_name": suite_ref.module.name, + "suite_name": suite_ref.name, + } + + +def _deserialize_test_ref(data: t.Dict[str, str]) -> TestRef: + """Deserialize a TestRef from a dictionary.""" + module_ref = ModuleRef(data["module_name"]) + suite_ref = SuiteRef(module_ref, data["suite_name"]) + return TestRef(suite_ref, data["test_name"]) + + +def _deserialize_suite_ref(data: t.Dict[str, str]) -> SuiteRef: + """Deserialize a SuiteRef from a dictionary.""" + module_ref = ModuleRef(data["module_name"]) + return SuiteRef(module_ref, data["suite_name"]) + + +def serialize_mock_config(config: SubprocessMockConfig) -> t.Dict[str, str]: + """Serialize mock configuration to environment variables.""" + env_vars = {} + + # Serialize API client config + env_vars["DDTESTOPT_MOCK_API_CONFIG"] = json.dumps(config.api_client_config) + + # Serialize skippable items + skippable_data = [] + for item in config.skippable_items: + if isinstance(item, TestRef): + skippable_data.append(_serialize_test_ref(item)) + elif isinstance(item, SuiteRef): + skippable_data.append(_serialize_suite_ref(item)) + env_vars["DDTESTOPT_MOCK_SKIPPABLE_ITEMS"] = json.dumps(skippable_data) + + # Serialize known tests + known_tests_data = [_serialize_test_ref(test_ref) for test_ref in config.known_tests] + env_vars["DDTESTOPT_MOCK_KNOWN_TESTS"] = json.dumps(known_tests_data) + + # Add additional environment variables + env_vars.update(config.environment_vars) + + # Add standard test environment variables + env_vars.update( + { + "DD_API_KEY": "test-api-key", + "DD_SERVICE": "test-service", + "DD_ENV": "test-env", + "DDTESTOPT_SUBPROCESS_MOCKING": "true", + } + ) + + return env_vars + + +def generate_conftest_content() -> str: + """Generate conftest.py content for subprocess mocking.""" + return '''#!/usr/bin/env python3 +"""Auto-generated conftest.py for subprocess mocking.""" + +import json +import os +from unittest.mock import Mock, patch +import pytest + +# Import the mock utilities we need +import sys +from pathlib import Path + +# Add the parent directory to the path so we can import our test utilities +test_dir = Path(__file__).parent.parent +if str(test_dir) not in sys.path: + sys.path.insert(0, str(test_dir)) + +from ddtestopt.internal.test_data import ModuleRef, SuiteRef, TestRef +from ddtestopt.internal.api_client import AutoTestRetriesSettings +from ddtestopt.internal.api_client import EarlyFlakeDetectionSettings +from ddtestopt.internal.api_client import Settings +from ddtestopt.internal.api_client import TestManagementSettings + + +def _deserialize_test_ref(data): + """Deserialize a TestRef from a dictionary.""" + module_ref = ModuleRef(data['module_name']) + suite_ref = SuiteRef(module_ref, data['suite_name']) + return TestRef(suite_ref, data['test_name']) + + +def _deserialize_suite_ref(data): + """Deserialize a SuiteRef from a dictionary.""" + module_ref = ModuleRef(data['module_name']) + return SuiteRef(module_ref, data['suite_name']) + + +def _setup_subprocess_mocks(): + """Set up mocks based on environment variables.""" + if not os.getenv('DDTESTOPT_SUBPROCESS_MOCKING'): + return + + # Parse API client configuration + api_config_str = os.getenv('DDTESTOPT_MOCK_API_CONFIG', '{}') + api_config = json.loads(api_config_str) + + # Parse skippable items + skippable_items_str = os.getenv('DDTESTOPT_MOCK_SKIPPABLE_ITEMS', '[]') + skippable_items_data = json.loads(skippable_items_str) + skippable_items = set() + for item_data in skippable_items_data: + if item_data['type'] == 'TestRef': + skippable_items.add(_deserialize_test_ref(item_data)) + elif item_data['type'] == 'SuiteRef': + skippable_items.add(_deserialize_suite_ref(item_data)) + + # Parse known tests + known_tests_str = os.getenv('DDTESTOPT_MOCK_KNOWN_TESTS', '[]') + known_tests_data = json.loads(known_tests_str) + known_tests = {_deserialize_test_ref(test_data) for test_data in known_tests_data} + + # Create mock API client + mock_api_client = Mock() + mock_api_client.get_settings.return_value = Settings( + early_flake_detection=EarlyFlakeDetectionSettings( + enabled=api_config.get('efd_enabled', False), + slow_test_retries_5s=3, + slow_test_retries_10s=2, + slow_test_retries_30s=1, + slow_test_retries_5m=1, + faulty_session_threshold=30, + ), + test_management=TestManagementSettings(enabled=api_config.get('test_management_enabled', False)), + auto_test_retries=AutoTestRetriesSettings(enabled=api_config.get('auto_retries_enabled', False)), + known_tests_enabled=api_config.get('known_tests_enabled', False), + coverage_enabled=False, + skipping_enabled=api_config.get('skipping_enabled', False), + require_git=False, + itr_enabled=api_config.get('skipping_enabled', False), + ) + + mock_api_client.get_known_tests.return_value = known_tests + mock_api_client.get_test_management_properties.return_value = {} + mock_api_client.get_known_commits.return_value = [] + mock_api_client.send_git_pack_file.return_value = None + mock_api_client.get_skippable_tests.return_value = ( + skippable_items, + "correlation-123" if skippable_items else None, + ) + + # Create mock git instance + mock_git_instance = Mock() + mock_git_instance.get_latest_commits.return_value = [] + mock_git_instance.get_filtered_revisions.return_value = [] + mock_git_instance.pack_objects.return_value = iter([]) + + # Create mock writer + mock_writer = Mock() + mock_writer.flush.return_value = None + mock_writer._send_events.return_value = None + + # Create mock backend connector + mock_connector = Mock() + mock_connector.post_json.return_value = (Mock(), {}) + mock_connector.request.return_value = (Mock(), {}) + mock_connector.post_files.return_value = (Mock(), {}) + + # Apply all the patches + patcher1 = patch("ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client) + patcher2 = patch("ddtestopt.internal.session_manager.get_git_tags", return_value={}) + patcher3 = patch("ddtestopt.internal.session_manager.get_platform_tags", return_value={}) + patcher4 = patch("ddtestopt.internal.session_manager.Git", return_value=mock_git_instance) + patcher5 = patch("ddtestopt.internal.http.BackendConnector", return_value=mock_connector) + patcher6 = patch("ddtestopt.internal.writer.TestOptWriter", return_value=mock_writer) + patcher7 = patch("ddtestopt.internal.writer.TestCoverageWriter", return_value=mock_writer) + + # Start all patches + patcher1.start() + patcher2.start() + patcher3.start() + patcher4.start() + patcher5.start() + patcher6.start() + patcher7.start() + + +# Set up mocks as early as possible +_setup_subprocess_mocks() + + +@pytest.fixture(autouse=True) +def ensure_mocks_are_active(): + """Ensure mocks are active for all tests.""" + # This fixture runs for every test, ensuring mocks stay active + pass +''' + + +def create_subprocess_mock_config(**kwargs: t.Any) -> SubprocessMockConfig: + """Create a SubprocessMockConfig with sensible defaults.""" + config = SubprocessMockConfig() + + # Apply any provided configuration + if "skipping_enabled" in kwargs: + config.with_skipping_enabled(kwargs["skipping_enabled"]) + if "auto_retries_enabled" in kwargs: + config.with_auto_retries_enabled(kwargs["auto_retries_enabled"]) + if "efd_enabled" in kwargs: + config.with_early_flake_detection(kwargs["efd_enabled"]) + if "test_management_enabled" in kwargs: + config.with_test_management(kwargs["test_management_enabled"]) + if "known_tests_enabled" in kwargs: + config.with_known_tests(kwargs["known_tests_enabled"], kwargs.get("known_tests")) + if "skippable_items" in kwargs: + config.with_skippable_items(kwargs["skippable_items"]) + if "environment_vars" in kwargs: + config.with_environment_vars(kwargs["environment_vars"]) + + return config + + +def setup_subprocess_environment(pytester: Pytester, config: SubprocessMockConfig) -> None: + """Set up the subprocess environment with mocks.""" + # Serialize configuration to environment variables + env_vars = serialize_mock_config(config) + + # Set environment variables using pytester's mechanism + for key, value in env_vars.items(): + pytester._monkeypatch.setenv(key, value) + + # Create conftest.py in the test directory + pytester.makeconftest(generate_conftest_content()) + + +# Convenience functions for common test scenarios + + +def setup_basic_subprocess_mocks(pytester: Pytester) -> None: + """Set up basic mocks for simple test execution.""" + config = create_subprocess_mock_config() + setup_subprocess_environment(pytester, config) + + +def setup_retry_subprocess_mocks(pytester: Pytester) -> None: + """Set up mocks for auto retry functionality testing.""" + config = create_subprocess_mock_config(auto_retries_enabled=True) + setup_subprocess_environment(pytester, config) + + +def setup_efd_subprocess_mocks(pytester: Pytester, known_tests: t.Optional[t.Set[TestRef]] = None) -> None: + """Set up mocks for Early Flake Detection testing.""" + config = create_subprocess_mock_config(efd_enabled=True, known_tests_enabled=True, known_tests=known_tests or set()) + setup_subprocess_environment(pytester, config) + + +def setup_itr_subprocess_mocks( + pytester: Pytester, skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None +) -> None: + """Set up mocks for Intelligent Test Runner testing.""" + config = create_subprocess_mock_config(skipping_enabled=True, skippable_items=skippable_items or set()) + setup_subprocess_environment(pytester, config) diff --git a/tests/test_integration.py b/tests/test_integration.py index 22ba267..9d93861 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -14,8 +14,11 @@ from ddtestopt.internal.test_data import TestRef from ddtestopt.internal.test_data import TestSession from tests.mocks import mock_api_client_settings -from tests.mocks import network_mocks from tests.mocks import setup_standard_mocks +from tests.subprocess_mocks import setup_basic_subprocess_mocks +from tests.subprocess_mocks import setup_efd_subprocess_mocks +from tests.subprocess_mocks import setup_itr_subprocess_mocks +from tests.subprocess_mocks import setup_retry_subprocess_mocks # Functions moved to tests.mocks for centralization @@ -36,11 +39,11 @@ def test_simple(): """ ) - # Use network mocks to prevent all real HTTP calls - with network_mocks(), patch("ddtestopt.internal.session_manager.APIClient") as mock_api_client: - mock_api_client.return_value = mock_api_client_settings() + # Set up mocks for subprocess execution + setup_basic_subprocess_mocks(pytester) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") # Test should pass assert result.ret == 0 @@ -62,11 +65,14 @@ def test_passes(): """ ) - # Use network mocks to prevent all real HTTP calls - with network_mocks(), patch("ddtestopt.internal.session_manager.APIClient") as mock_api_client: - mock_api_client.return_value = mock_api_client_settings(auto_retries_enabled=True) - monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Set up mocks for subprocess execution with retry functionality + setup_retry_subprocess_mocks(pytester) + + # Set retry-related environment variables + pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") + + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") # Check that the test failed after retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -114,15 +120,11 @@ def test_known_test(): known_suite = SuiteRef(ModuleRef("."), "test_efd.py") known_test_ref = TestRef(known_suite, "test_known_test") - # Use unified mock setup with EFD enabled - with patch( - "ddtestopt.internal.session_manager.APIClient", - return_value=mock_api_client_settings( - efd_enabled=True, known_tests_enabled=True, known_tests={known_test_ref} - ), - ), setup_standard_mocks(): + # Set up mocks for subprocess execution with EFD enabled + setup_efd_subprocess_mocks(pytester, known_tests={known_test_ref}) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") # Check that the test failed after EFD retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -169,13 +171,11 @@ def test_should_run(): skippable_suite = SuiteRef(ModuleRef("."), "test_itr.py") skippable_test_ref = TestRef(skippable_suite, "test_should_be_skipped") - # Use unified mock setup with ITR enabled - with patch( - "ddtestopt.internal.session_manager.APIClient", - return_value=mock_api_client_settings(skipping_enabled=True, skippable_items={skippable_test_ref}), - ), setup_standard_mocks(): + # Set up mocks for subprocess execution with ITR enabled + setup_itr_subprocess_mocks(pytester, skippable_items={skippable_test_ref}) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") # Check that tests completed successfully assert result.ret == 0 # Exit code 0 indicates success @@ -216,12 +216,11 @@ def test_with_assertion(): """ ) - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): + # Set up mocks for subprocess execution + setup_basic_subprocess_mocks(pytester) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") # Check that tests ran successfully assert result.ret == 0 @@ -243,12 +242,11 @@ def test_passing(): """ ) - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): + # Set up mocks for subprocess execution + setup_basic_subprocess_mocks(pytester) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") # Check that one test failed and one passed assert result.ret == 1 # pytest exits with 1 when tests fail @@ -266,12 +264,11 @@ def test_plugin_loaded(): """ ) - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): + # Set up mocks for subprocess execution + setup_basic_subprocess_mocks(pytester) - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v") # Should run without plugin loading errors assert result.ret == 0 @@ -293,13 +290,11 @@ def test_command_extraction(): """ ) - # Set up mocks and environment - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): + # Set up mocks for subprocess execution + setup_basic_subprocess_mocks(pytester) - # Run with specific arguments that should be captured - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v") + # Run with specific arguments that should be captured in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v") assert result.ret == 0 result.assert_outcomes(passed=1) @@ -323,17 +318,16 @@ def test_simple_pass(): """ ) - # Set up mocks and environment (including retry env vars) - with patch( - "ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client_settings() - ), setup_standard_mocks(): - # Set all environment variables via monkeypatch + # Set up mocks for subprocess execution + setup_basic_subprocess_mocks(pytester) - monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_ENABLED", "true") - monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") - monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") + # Set retry-related environment variables + pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_ENABLED", "true") + pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") + pytester._monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") - result = pytester.runpytest("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run tests in subprocess + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") # Tests should pass assert result.ret == 0 From 7871b0444d837c18379cc9ea3e0d6887e2fb0ab9 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 16:39:07 +0200 Subject: [PATCH 02/20] subprocess and in-process pytester --- tests/mock_setup.py | 148 +++++++++++++++++ tests/subprocess_mocks.py | 333 ++++++++++++++++++++++++++++---------- tests/test_integration.py | 81 ++++------ 3 files changed, 424 insertions(+), 138 deletions(-) create mode 100644 tests/mock_setup.py diff --git a/tests/mock_setup.py b/tests/mock_setup.py new file mode 100644 index 0000000..c22e1e5 --- /dev/null +++ b/tests/mock_setup.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +"""Shared mock setup logic for both subprocess and in-process testing modes. + +This module contains the actual mock setup functions that are used by both: +1. Subprocess mode: imported by generated conftest.py +2. In-process mode: imported directly by test code + +This approach ensures coverage tracking and eliminates code duplication. +""" + +import typing as t +from unittest.mock import Mock +from unittest.mock import patch + +from ddtestopt.internal.api_client import AutoTestRetriesSettings +from ddtestopt.internal.api_client import EarlyFlakeDetectionSettings +from ddtestopt.internal.api_client import Settings +from ddtestopt.internal.api_client import TestManagementSettings + + +class MockConfig: + """Configuration object for mock setup.""" + + def __init__(self, api_client_config: t.Dict[str, t.Any], skippable_items: t.Set[t.Any], known_tests: t.Set[t.Any]): + self.api_client_config = api_client_config + self.skippable_items = skippable_items + self.known_tests = known_tests + + +def create_mock_objects(config: MockConfig) -> t.Dict[str, t.Any]: + """Create all mock objects based on configuration. + + Returns: + Dictionary containing all mock objects + """ + # Create mock git instance + mock_git_instance = Mock() + mock_git_instance.get_latest_commits.return_value = [] + mock_git_instance.get_filtered_revisions.return_value = [] + mock_git_instance.pack_objects.return_value = iter([]) + + # Create mock writer + mock_writer = Mock() + mock_writer.flush.return_value = None + mock_writer._send_events.return_value = None + + # Create mock backend connector + mock_connector = Mock() + mock_connector.post_json.return_value = (Mock(), {}) + mock_connector.request.return_value = (Mock(), {}) + mock_connector.post_files.return_value = (Mock(), {}) + + # Create API client mock + mock_api_client = Mock() + mock_api_client.get_settings.return_value = Settings( + early_flake_detection=EarlyFlakeDetectionSettings( + enabled=config.api_client_config.get("efd_enabled", False), + slow_test_retries_5s=3, + slow_test_retries_10s=2, + slow_test_retries_30s=1, + slow_test_retries_5m=1, + faulty_session_threshold=30, + ), + test_management=TestManagementSettings(enabled=config.api_client_config.get("test_management_enabled", False)), + auto_test_retries=AutoTestRetriesSettings(enabled=config.api_client_config.get("auto_retries_enabled", False)), + known_tests_enabled=config.api_client_config.get("known_tests_enabled", False), + coverage_enabled=False, + skipping_enabled=config.api_client_config.get("skipping_enabled", False), + require_git=False, + itr_enabled=config.api_client_config.get("skipping_enabled", False), + ) + + mock_api_client.get_known_tests.return_value = config.known_tests + mock_api_client.get_test_management_properties.return_value = {} + mock_api_client.get_known_commits.return_value = [] + mock_api_client.send_git_pack_file.return_value = None + mock_api_client.get_skippable_tests.return_value = ( + config.skippable_items, + "correlation-123" if config.skippable_items else None, + ) + + return { + "mock_git_instance": mock_git_instance, + "mock_writer": mock_writer, + "mock_connector": mock_connector, + "mock_api_client": mock_api_client, + } + + +def create_patchers(mock_objects: t.Dict[str, t.Any]) -> t.List[t.Any]: + """Create all patch objects. + + Args: + mock_objects: Dictionary of mock objects from create_mock_objects() + + Returns: + List of patcher objects + """ + patchers = [ + patch("ddtestopt.internal.session_manager.APIClient", return_value=mock_objects["mock_api_client"]), + patch("ddtestopt.internal.session_manager.get_git_tags", return_value={}), + patch("ddtestopt.internal.session_manager.get_platform_tags", return_value={}), + patch("ddtestopt.internal.session_manager.Git", return_value=mock_objects["mock_git_instance"]), + patch("ddtestopt.internal.http.BackendConnector", return_value=mock_objects["mock_connector"]), + patch("ddtestopt.internal.writer.TestOptWriter", return_value=mock_objects["mock_writer"]), + patch("ddtestopt.internal.writer.TestCoverageWriter", return_value=mock_objects["mock_writer"]), + ] + return patchers + + +def setup_mocks_for_subprocess(config: MockConfig) -> None: + """Set up mocks for subprocess execution (called from conftest.py). + + This function starts all patches and leaves them running for the subprocess. + """ + mock_objects = create_mock_objects(config) + patchers = create_patchers(mock_objects) + + # Start all patches for subprocess mode + for patcher in patchers: + patcher.start() + + +def setup_mocks_for_in_process(config: MockConfig) -> t.ContextManager[None]: + """Set up mocks for in-process execution. + + Returns: + Context manager that manages patch lifecycle + """ + from contextlib import contextmanager + + @contextmanager + def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: + mock_objects = create_mock_objects(config) + patchers = create_patchers(mock_objects) + + # Start all patches + for patcher in patchers: + patcher.start() + + try: + yield + finally: + # Stop all patches + for patcher in patchers: + patcher.stop() + + return _mock_context() diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py index 7adc9e5..d0822e0 100644 --- a/tests/subprocess_mocks.py +++ b/tests/subprocess_mocks.py @@ -1,14 +1,18 @@ #!/usr/bin/env python3 -"""Subprocess mocking utilities for test_integration.py refactoring. +"""Unified mocking utilities for test_integration.py. -This module provides utilities to set up mocks in pytest subprocesses when using -pytester.runpytest_subprocess(). It works by: +This module provides utilities to set up mocks for both subprocess and in-process +pytest execution modes. It supports: -1. Serializing mock configuration to environment variables -2. Creating a conftest.py file that reads these variables and sets up mocks -3. Providing helper functions to generate the necessary conftest content +1. Subprocess mode: Serializing mock configuration to environment variables and + creating a conftest.py file that sets up mocks in the subprocess +2. In-process mode: Using traditional context managers for mocking + +The interface is designed to be mode-agnostic, allowing tests to switch between +execution modes with minimal changes. """ +from contextlib import contextmanager import json import typing as t @@ -143,13 +147,12 @@ def serialize_mock_config(config: SubprocessMockConfig) -> t.Dict[str, str]: def generate_conftest_content() -> str: - """Generate conftest.py content for subprocess mocking.""" + """Generate conftest.py content for subprocess mocking using importable modules.""" return '''#!/usr/bin/env python3 """Auto-generated conftest.py for subprocess mocking.""" import json import os -from unittest.mock import Mock, patch import pytest # Import the mock utilities we need @@ -162,10 +165,7 @@ def generate_conftest_content() -> str: sys.path.insert(0, str(test_dir)) from ddtestopt.internal.test_data import ModuleRef, SuiteRef, TestRef -from ddtestopt.internal.api_client import AutoTestRetriesSettings -from ddtestopt.internal.api_client import EarlyFlakeDetectionSettings -from ddtestopt.internal.api_client import Settings -from ddtestopt.internal.api_client import TestManagementSettings +from tests.mock_setup import MockConfig, setup_mocks_for_subprocess def _deserialize_test_ref(data): @@ -182,7 +182,7 @@ def _deserialize_suite_ref(data): def _setup_subprocess_mocks(): - """Set up mocks based on environment variables.""" + """Set up mocks based on environment variables using importable module.""" if not os.getenv('DDTESTOPT_SUBPROCESS_MOCKING'): return @@ -205,69 +205,9 @@ def _setup_subprocess_mocks(): known_tests_data = json.loads(known_tests_str) known_tests = {_deserialize_test_ref(test_data) for test_data in known_tests_data} - # Create mock API client - mock_api_client = Mock() - mock_api_client.get_settings.return_value = Settings( - early_flake_detection=EarlyFlakeDetectionSettings( - enabled=api_config.get('efd_enabled', False), - slow_test_retries_5s=3, - slow_test_retries_10s=2, - slow_test_retries_30s=1, - slow_test_retries_5m=1, - faulty_session_threshold=30, - ), - test_management=TestManagementSettings(enabled=api_config.get('test_management_enabled', False)), - auto_test_retries=AutoTestRetriesSettings(enabled=api_config.get('auto_retries_enabled', False)), - known_tests_enabled=api_config.get('known_tests_enabled', False), - coverage_enabled=False, - skipping_enabled=api_config.get('skipping_enabled', False), - require_git=False, - itr_enabled=api_config.get('skipping_enabled', False), - ) - - mock_api_client.get_known_tests.return_value = known_tests - mock_api_client.get_test_management_properties.return_value = {} - mock_api_client.get_known_commits.return_value = [] - mock_api_client.send_git_pack_file.return_value = None - mock_api_client.get_skippable_tests.return_value = ( - skippable_items, - "correlation-123" if skippable_items else None, - ) - - # Create mock git instance - mock_git_instance = Mock() - mock_git_instance.get_latest_commits.return_value = [] - mock_git_instance.get_filtered_revisions.return_value = [] - mock_git_instance.pack_objects.return_value = iter([]) - - # Create mock writer - mock_writer = Mock() - mock_writer.flush.return_value = None - mock_writer._send_events.return_value = None - - # Create mock backend connector - mock_connector = Mock() - mock_connector.post_json.return_value = (Mock(), {}) - mock_connector.request.return_value = (Mock(), {}) - mock_connector.post_files.return_value = (Mock(), {}) - - # Apply all the patches - patcher1 = patch("ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client) - patcher2 = patch("ddtestopt.internal.session_manager.get_git_tags", return_value={}) - patcher3 = patch("ddtestopt.internal.session_manager.get_platform_tags", return_value={}) - patcher4 = patch("ddtestopt.internal.session_manager.Git", return_value=mock_git_instance) - patcher5 = patch("ddtestopt.internal.http.BackendConnector", return_value=mock_connector) - patcher6 = patch("ddtestopt.internal.writer.TestOptWriter", return_value=mock_writer) - patcher7 = patch("ddtestopt.internal.writer.TestCoverageWriter", return_value=mock_writer) - - # Start all patches - patcher1.start() - patcher2.start() - patcher3.start() - patcher4.start() - patcher5.start() - patcher6.start() - patcher7.start() + # Create configuration object and set up mocks using importable module + config = MockConfig(api_config, skippable_items, known_tests) + setup_mocks_for_subprocess(config) # Set up mocks as early as possible @@ -318,30 +258,245 @@ def setup_subprocess_environment(pytester: Pytester, config: SubprocessMockConfi pytester.makeconftest(generate_conftest_content()) -# Convenience functions for common test scenarios +# ============================================================================= +# SHARED MOCK SETUP LOGIC (Using importable modules) +# ============================================================================= +# Mock setup logic is now in mock_setup.py for better coverage tracking -def setup_basic_subprocess_mocks(pytester: Pytester) -> None: + +# ============================================================================= +# IN-PROCESS MOCKING SYSTEM (Uses shared logic) +# ============================================================================= + + +@contextmanager +def _setup_in_process_mocks(config: SubprocessMockConfig) -> t.Generator[t.Any, t.Any, t.Any]: + """Set up mocks for in-process testing using importable module.""" + from tests.mock_setup import MockConfig + from tests.mock_setup import setup_mocks_for_in_process + + # Convert SubprocessMockConfig to MockConfig + mock_config = MockConfig( + api_client_config=config.api_client_config, + skippable_items=config.skippable_items, + known_tests=config.known_tests, + ) + + # Use the importable module for mock setup + with setup_mocks_for_in_process(mock_config): + yield + + +# ============================================================================= +# UNIFIED INTERFACE - SUPPORTS BOTH MODES +# ============================================================================= + + +def as_bool(val: str) -> bool: + return val.strip().lower() in ("true", "1") + + +def get_subprocess_test_mode() -> bool: + """Get the test execution mode from environment variable or ddtrace plugin detection. + + Auto-detection logic: + 1. If _DDTESTOPT_SUBPROCESS_TEST_MODE is explicitly set, use that value + 2. If ddtrace pytest plugin is active, use subprocess mode for isolation + 3. Otherwise, default to in-process mode for speed + + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=true to force subprocess execution. + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=false to force in-process execution. + """ + import os + + # Check for explicit environment variable first + env_val = os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE") + if env_val is not None: + return as_bool(env_val) + + # Auto-detect based on ddtrace pytest plugin being active + try: + # Simple approach: check if the ddtrace pytest plugin is imported and active + # We look for the actual ddtrace plugin module being loaded + import sys + + # Check if ddtrace plugin is loaded but not disabled + if "ddtrace.contrib.pytest.plugin" in sys.modules: + # The plugin module is loaded, now check if it's active + # Look at sys.argv to see if ddtrace was explicitly disabled + import sys + + cmdline = " ".join(sys.argv) + if "-p no:ddtrace" in cmdline or "--no-ddtrace" in cmdline: + return False # Explicitly disabled + return True # Plugin loaded and not disabled + + except (ImportError, AttributeError): + pass + + # Default to in-process mode for faster execution when ddtrace plugin not detected + return False + + +def setup_test_mocks( + pytester: Pytester, subprocess_mode: t.Optional[bool] = None, **config_kwargs: t.Any +) -> t.Optional[t.ContextManager[None]]: + """Unified interface for setting up test mocks. + + Args: + pytester: The pytest Pytester instance + mode: Either "subprocess" or "in-process". If None, uses DDTESTOPT_TEST_MODE env var + **config_kwargs: Configuration options for the mocks + + Returns: + None for subprocess mode, context manager for in-process mode + + Example: + # Subprocess mode (new) + setup_test_mocks(pytester, mode="subprocess") + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-v") + + # In-process mode (original) + with setup_test_mocks(pytester, mode="in-process"): + result = pytester.runpytest("-p", "ddtestopt", "-v") + + # Environment-controlled mode + # Set DDTESTOPT_TEST_MODE=in-process before running tests + context = setup_test_mocks(pytester) # Uses env var + if context: + with context: + result = pytester.runpytest("-p", "ddtestopt", "-v") + else: + result = pytester.runpytest_subprocess("-p", "ddtestopt", "-v") + """ + if subprocess_mode is None: + subprocess_mode = get_subprocess_test_mode() + + config = create_subprocess_mock_config(**config_kwargs) + + if subprocess_mode: + setup_subprocess_environment(pytester, config) + return None + + return _setup_in_process_mocks(config) + + +# ============================================================================= +# CONVENIENCE FUNCTIONS - SUPPORT BOTH MODES +# ============================================================================= + + +def setup_basic_mocks( + pytester: Pytester, subprocess_mode: t.Optional[bool] = None +) -> t.Optional[t.ContextManager[None]]: """Set up basic mocks for simple test execution.""" - config = create_subprocess_mock_config() - setup_subprocess_environment(pytester, config) + return setup_test_mocks(pytester, subprocess_mode=subprocess_mode) -def setup_retry_subprocess_mocks(pytester: Pytester) -> None: +def setup_retry_mocks( + pytester: Pytester, subprocess_mode: t.Optional[bool] = None +) -> t.Optional[t.ContextManager[None]]: """Set up mocks for auto retry functionality testing.""" - config = create_subprocess_mock_config(auto_retries_enabled=True) - setup_subprocess_environment(pytester, config) + return setup_test_mocks(pytester, subprocess_mode=subprocess_mode, auto_retries_enabled=True) -def setup_efd_subprocess_mocks(pytester: Pytester, known_tests: t.Optional[t.Set[TestRef]] = None) -> None: +def setup_efd_mocks( + pytester: Pytester, subprocess_mode: t.Optional[bool] = None, known_tests: t.Optional[t.Set[TestRef]] = None +) -> t.Optional[t.ContextManager[None]]: """Set up mocks for Early Flake Detection testing.""" - config = create_subprocess_mock_config(efd_enabled=True, known_tests_enabled=True, known_tests=known_tests or set()) - setup_subprocess_environment(pytester, config) + return setup_test_mocks( + pytester, + subprocess_mode=subprocess_mode, + efd_enabled=True, + known_tests_enabled=True, + known_tests=known_tests or set(), + ) + + +def setup_itr_mocks( + pytester: Pytester, + subprocess_mode: t.Optional[bool] = None, + skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None, +) -> t.Optional[t.ContextManager[None]]: + """Set up mocks for Intelligent Test Runner testing.""" + return setup_test_mocks( + pytester, subprocess_mode=subprocess_mode, skipping_enabled=True, skippable_items=skippable_items or set() + ) + + +# ============================================================================= +# UTILITY FUNCTIONS FOR DUAL-MODE EXECUTION +# ============================================================================= + + +def run_test_with_mocks( + pytester: Pytester, pytest_args: t.List[str], subprocess_mode: t.Optional[bool] = None, **mock_config: t.Any +) -> t.Any: + """Run a test with appropriate mocking based on the mode. + + This utility function handles the conditional execution pattern: + - For subprocess mode: sets up mocks and runs runpytest_subprocess() + - For in-process mode: uses context manager and runs runpytest() + + Args: + pytester: The pytest Pytester instance + pytest_args: Arguments to pass to pytest + mode: Test execution mode (if None, uses environment variable) + **mock_config: Mock configuration options + + Returns: + The result from pytester.runpytest() or pytester.runpytest_subprocess() + + Example: + # Simple usage - mode determined by environment + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-v"]) + + # With specific configuration + result = run_test_with_mocks( + pytester, + ["-p", "ddtestopt", "-v"], + auto_retries_enabled=True + ) + """ + if subprocess_mode is None: + subprocess_mode = get_subprocess_test_mode() + + context = setup_test_mocks(pytester, subprocess_mode=subprocess_mode, **mock_config) + + if context is not None: + # In-process mode + with context: + return pytester.runpytest(*pytest_args) + else: + # Subprocess mode + return pytester.runpytest_subprocess(*pytest_args) + + +# ============================================================================= +# BACKWARD COMPATIBILITY - SUBPROCESS-ONLY FUNCTIONS +# ============================================================================= + + +def setup_basic_subprocess_mocks(pytester: Pytester) -> None: + """Set up basic mocks for simple test execution (subprocess only).""" + setup_test_mocks(pytester, subprocess_mode=True) + + +def setup_retry_subprocess_mocks(pytester: Pytester) -> None: + """Set up mocks for auto retry functionality testing (subprocess only).""" + setup_test_mocks(pytester, subprocess_mode=True, auto_retries_enabled=True) + + +def setup_efd_subprocess_mocks(pytester: Pytester, known_tests: t.Optional[t.Set[TestRef]] = None) -> None: + """Set up mocks for Early Flake Detection testing (subprocess only).""" + setup_test_mocks( + pytester, subprocess_mode=True, efd_enabled=True, known_tests_enabled=True, known_tests=known_tests or set() + ) def setup_itr_subprocess_mocks( pytester: Pytester, skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None ) -> None: - """Set up mocks for Intelligent Test Runner testing.""" - config = create_subprocess_mock_config(skipping_enabled=True, skippable_items=skippable_items or set()) - setup_subprocess_environment(pytester, config) + """Set up mocks for Intelligent Test Runner testing (subprocess only).""" + setup_test_mocks(pytester, subprocess_mode=True, skipping_enabled=True, skippable_items=skippable_items or set()) diff --git a/tests/test_integration.py b/tests/test_integration.py index 9d93861..45ce537 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -15,10 +15,7 @@ from ddtestopt.internal.test_data import TestSession from tests.mocks import mock_api_client_settings from tests.mocks import setup_standard_mocks -from tests.subprocess_mocks import setup_basic_subprocess_mocks -from tests.subprocess_mocks import setup_efd_subprocess_mocks -from tests.subprocess_mocks import setup_itr_subprocess_mocks -from tests.subprocess_mocks import setup_retry_subprocess_mocks +from tests.subprocess_mocks import run_test_with_mocks # Functions moved to tests.mocks for centralization @@ -39,11 +36,8 @@ def test_simple(): """ ) - # Set up mocks for subprocess execution - setup_basic_subprocess_mocks(pytester) - - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection (subprocess by default, in-process if ddtrace not loaded) + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) # Test should pass assert result.ret == 0 @@ -65,14 +59,13 @@ def test_passes(): """ ) - # Set up mocks for subprocess execution with retry functionality - setup_retry_subprocess_mocks(pytester) - # Set retry-related environment variables pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run test with automatic mode detection and retry configuration + result = run_test_with_mocks( + pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], auto_retries_enabled=True + ) # Check that the test failed after retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -120,11 +113,14 @@ def test_known_test(): known_suite = SuiteRef(ModuleRef("."), "test_efd.py") known_test_ref = TestRef(known_suite, "test_known_test") - # Set up mocks for subprocess execution with EFD enabled - setup_efd_subprocess_mocks(pytester, known_tests={known_test_ref}) - - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run test with automatic mode detection and EFD configuration + result = run_test_with_mocks( + pytester, + ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], + efd_enabled=True, + known_tests_enabled=True, + known_tests={known_test_ref}, + ) # Check that the test failed after EFD retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -171,11 +167,13 @@ def test_should_run(): skippable_suite = SuiteRef(ModuleRef("."), "test_itr.py") skippable_test_ref = TestRef(skippable_suite, "test_should_be_skipped") - # Set up mocks for subprocess execution with ITR enabled - setup_itr_subprocess_mocks(pytester, skippable_items={skippable_test_ref}) - - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s") + # Run test with automatic mode detection and ITR configuration + result = run_test_with_mocks( + pytester, + ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], + skipping_enabled=True, + skippable_items={skippable_test_ref}, + ) # Check that tests completed successfully assert result.ret == 0 # Exit code 0 indicates success @@ -216,11 +214,8 @@ def test_with_assertion(): """ ) - # Set up mocks for subprocess execution - setup_basic_subprocess_mocks(pytester) - - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) # Check that tests ran successfully assert result.ret == 0 @@ -242,11 +237,8 @@ def test_passing(): """ ) - # Set up mocks for subprocess execution - setup_basic_subprocess_mocks(pytester) - - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) # Check that one test failed and one passed assert result.ret == 1 # pytest exits with 1 when tests fail @@ -264,11 +256,8 @@ def test_plugin_loaded(): """ ) - # Set up mocks for subprocess execution - setup_basic_subprocess_mocks(pytester) - - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v") + # Run test with automatic mode detection + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v"]) # Should run without plugin loading errors assert result.ret == 0 @@ -290,11 +279,8 @@ def test_command_extraction(): """ ) - # Set up mocks for subprocess execution - setup_basic_subprocess_mocks(pytester) - - # Run with specific arguments that should be captured in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v") + # Run test with automatic mode detection + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v"]) assert result.ret == 0 result.assert_outcomes(passed=1) @@ -318,16 +304,13 @@ def test_simple_pass(): """ ) - # Set up mocks for subprocess execution - setup_basic_subprocess_mocks(pytester) - # Set retry-related environment variables pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_ENABLED", "true") pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") pytester._monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") - # Run tests in subprocess - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-p", "no:ddtrace", "-v") + # Run test with automatic mode detection + result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) # Tests should pass assert result.ret == 0 From d79adb421a4d719ead18177d9e3c7b70d7d0069e Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 17:05:37 +0200 Subject: [PATCH 03/20] small refactor --- tests/mock_setup.py | 71 +++++++++++++++------------------------ tests/subprocess_mocks.py | 18 ++++++++-- 2 files changed, 43 insertions(+), 46 deletions(-) diff --git a/tests/mock_setup.py b/tests/mock_setup.py index c22e1e5..c9f3d27 100644 --- a/tests/mock_setup.py +++ b/tests/mock_setup.py @@ -6,17 +6,13 @@ 2. In-process mode: imported directly by test code This approach ensures coverage tracking and eliminates code duplication. +Now uses builders from mocks.py for consistent mock creation. """ import typing as t from unittest.mock import Mock from unittest.mock import patch -from ddtestopt.internal.api_client import AutoTestRetriesSettings -from ddtestopt.internal.api_client import EarlyFlakeDetectionSettings -from ddtestopt.internal.api_client import Settings -from ddtestopt.internal.api_client import TestManagementSettings - class MockConfig: """Configuration object for mock setup.""" @@ -28,56 +24,43 @@ def __init__(self, api_client_config: t.Dict[str, t.Any], skippable_items: t.Set def create_mock_objects(config: MockConfig) -> t.Dict[str, t.Any]: - """Create all mock objects based on configuration. + """Create all mock objects based on configuration using builders from mocks.py. Returns: Dictionary containing all mock objects """ - # Create mock git instance - mock_git_instance = Mock() - mock_git_instance.get_latest_commits.return_value = [] - mock_git_instance.get_filtered_revisions.return_value = [] - mock_git_instance.pack_objects.return_value = iter([]) + # Import builders from mocks.py to avoid import cycles at module level + from tests.mocks import APIClientMockBuilder + from tests.mocks import BackendConnectorMockBuilder + from tests.mocks import get_mock_git_instance + + # Create mock git instance using existing helper + mock_git_instance = get_mock_git_instance() - # Create mock writer + # Create mock writer (simple mock, no builder needed for this) mock_writer = Mock() mock_writer.flush.return_value = None mock_writer._send_events.return_value = None - # Create mock backend connector - mock_connector = Mock() - mock_connector.post_json.return_value = (Mock(), {}) - mock_connector.request.return_value = (Mock(), {}) - mock_connector.post_files.return_value = (Mock(), {}) - - # Create API client mock - mock_api_client = Mock() - mock_api_client.get_settings.return_value = Settings( - early_flake_detection=EarlyFlakeDetectionSettings( - enabled=config.api_client_config.get("efd_enabled", False), - slow_test_retries_5s=3, - slow_test_retries_10s=2, - slow_test_retries_30s=1, - slow_test_retries_5m=1, - faulty_session_threshold=30, - ), - test_management=TestManagementSettings(enabled=config.api_client_config.get("test_management_enabled", False)), - auto_test_retries=AutoTestRetriesSettings(enabled=config.api_client_config.get("auto_retries_enabled", False)), - known_tests_enabled=config.api_client_config.get("known_tests_enabled", False), - coverage_enabled=False, - skipping_enabled=config.api_client_config.get("skipping_enabled", False), - require_git=False, - itr_enabled=config.api_client_config.get("skipping_enabled", False), + # Create mock backend connector using builder + mock_connector = BackendConnectorMockBuilder().build() + + # Create API client mock using builder with configuration + api_builder = APIClientMockBuilder() + + api_builder.with_skipping_enabled( + enabled=config.api_client_config.get("skipping_enabled", False) + ).with_auto_retries(enabled=config.api_client_config.get("auto_retries_enabled", False)).with_early_flake_detection( + enabled=config.api_client_config.get("efd_enabled", False) + ).with_test_management( + enabled=config.api_client_config.get("test_management_enabled", False) + ).with_known_tests( + enabled=config.api_client_config.get("known_tests_enabled", False), tests=config.known_tests + ).with_skippable_items( + config.skippable_items ) - mock_api_client.get_known_tests.return_value = config.known_tests - mock_api_client.get_test_management_properties.return_value = {} - mock_api_client.get_known_commits.return_value = [] - mock_api_client.send_git_pack_file.return_value = None - mock_api_client.get_skippable_tests.return_value = ( - config.skippable_items, - "correlation-123" if config.skippable_items else None, - ) + mock_api_client = api_builder.build() return { "mock_git_instance": mock_git_instance, diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py index d0822e0..207be23 100644 --- a/tests/subprocess_mocks.py +++ b/tests/subprocess_mocks.py @@ -10,6 +10,10 @@ The interface is designed to be mode-agnostic, allowing tests to switch between execution modes with minimal changes. + +CONSOLIDATION NOTE: This module now uses builders from tests/mocks.py internally +via tests/mock_setup.py to eliminate code duplication and ensure consistent +mock behavior across all test modes. """ from contextlib import contextmanager @@ -98,14 +102,24 @@ def _serialize_suite_ref(suite_ref: SuiteRef) -> t.Dict[str, str]: def _deserialize_test_ref(data: t.Dict[str, str]) -> TestRef: - """Deserialize a TestRef from a dictionary.""" + """Deserialize a TestRef from a dictionary. + + Note: This function is duplicated in the generated conftest.py content + for subprocess mocking. The duplication is necessary because conftest.py + needs self-contained deserialization functions. + """ module_ref = ModuleRef(data["module_name"]) suite_ref = SuiteRef(module_ref, data["suite_name"]) return TestRef(suite_ref, data["test_name"]) def _deserialize_suite_ref(data: t.Dict[str, str]) -> SuiteRef: - """Deserialize a SuiteRef from a dictionary.""" + """Deserialize a SuiteRef from a dictionary. + + Note: This function is duplicated in the generated conftest.py content + for subprocess mocking. The duplication is necessary because conftest.py + needs self-contained deserialization functions. + """ module_ref = ModuleRef(data["module_name"]) return SuiteRef(module_ref, data["suite_name"]) From 230274c08b6da0176cf3abb1e8dc7f36471d2508 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 17:07:11 +0200 Subject: [PATCH 04/20] cov doesn't load ddtrace --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 741711b..c266737 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,8 +52,8 @@ matrix.pytest.dependencies = [ [tool.hatch.envs.hatch-test.scripts] run = "pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt {args:tests}" -run-cov = "python -m slipcover --source ddtestopt/internal --pretty-print -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt {args:tests}" -cov-html = "python -m slipcover --source ddtestopt --html --out slipcover.html -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt {args:tests}" +run-cov = "python -m slipcover --source ddtestopt/internal --pretty-print -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt -p no:ddtrace {args:tests}" +cov-html = "python -m slipcover --source ddtestopt --html --out slipcover.html -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt -p no:ddtrace {args:tests}" cov-combine = "true" cov-report = "true" From 93b490cd31bdf266269dcad4adb2e4c346392fff Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 17:27:35 +0200 Subject: [PATCH 05/20] simplify code --- tests/subprocess_mocks.py | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py index 207be23..cba1e28 100644 --- a/tests/subprocess_mocks.py +++ b/tests/subprocess_mocks.py @@ -18,6 +18,8 @@ from contextlib import contextmanager import json +import os +import sys import typing as t from _pytest.pytester import Pytester @@ -322,34 +324,20 @@ def get_subprocess_test_mode() -> bool: Set _DDTESTOPT_SUBPROCESS_TEST_MODE=true to force subprocess execution. Set _DDTESTOPT_SUBPROCESS_TEST_MODE=false to force in-process execution. """ - import os - # Check for explicit environment variable first env_val = os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE") if env_val is not None: return as_bool(env_val) - # Auto-detect based on ddtrace pytest plugin being active - try: - # Simple approach: check if the ddtrace pytest plugin is imported and active - # We look for the actual ddtrace plugin module being loaded - import sys - - # Check if ddtrace plugin is loaded but not disabled - if "ddtrace.contrib.pytest.plugin" in sys.modules: - # The plugin module is loaded, now check if it's active - # Look at sys.argv to see if ddtrace was explicitly disabled - import sys - - cmdline = " ".join(sys.argv) - if "-p no:ddtrace" in cmdline or "--no-ddtrace" in cmdline: - return False # Explicitly disabled - return True # Plugin loaded and not disabled - - except (ImportError, AttributeError): - pass + # Check if the ddtrace pytest plugin is imported and active + # Check if ddtrace plugin is loaded but not disabled + if "ddtrace" in sys.modules: + # The plugin module is loaded, now check if it's active + # Look at sys.argv to see if ddtrace was explicitly disabled + cmdline = " ".join(sys.argv) + return "--ddtrace" in cmdline - # Default to in-process mode for faster execution when ddtrace plugin not detected + # Default to in-process mode return False From 606cf2fd74c78ca0961ed985bd5f92f8f6779939 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 17:38:53 +0200 Subject: [PATCH 06/20] mocks side effects --- tests/mocks.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/mocks.py b/tests/mocks.py index 9c501f5..f16c0a9 100644 --- a/tests/mocks.py +++ b/tests/mocks.py @@ -415,9 +415,8 @@ def with_request_response(self, method: str, path: str, response_data: t.Any) -> def build(self) -> Mock: """Build the BackendConnector mock.""" - from ddtestopt.internal.http import BackendConnector - - mock_connector = Mock(spec=BackendConnector) + # Create a simple Mock without spec to avoid CI environment issues + mock_connector = Mock() # Mock methods to prevent real HTTP calls def mock_post_json(endpoint: str, data: t.Any) -> t.Tuple[Mock, t.Any]: @@ -434,9 +433,9 @@ def mock_request(method: str, path: str, **kwargs: t.Any) -> t.Tuple[Mock, t.Any def mock_post_files(path: str, files: t.Any, **kwargs: t.Any) -> t.Tuple[Mock, t.Dict[str, t.Any]]: return Mock(), {} - mock_connector.post_json.side_effect = mock_post_json - mock_connector.request.side_effect = mock_request - mock_connector.post_files.side_effect = mock_post_files + mock_connector.post_json = Mock(side_effect=mock_post_json) + mock_connector.request = Mock(side_effect=mock_request) + mock_connector.post_files = Mock(side_effect=mock_post_files) return mock_connector From 0bf706333cda98f6009692f135cf1c4491bd138f Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 17:55:09 +0200 Subject: [PATCH 07/20] check also pytest_addopts --- tests/subprocess_mocks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py index cba1e28..a36c390 100644 --- a/tests/subprocess_mocks.py +++ b/tests/subprocess_mocks.py @@ -335,7 +335,8 @@ def get_subprocess_test_mode() -> bool: # The plugin module is loaded, now check if it's active # Look at sys.argv to see if ddtrace was explicitly disabled cmdline = " ".join(sys.argv) - return "--ddtrace" in cmdline + pytest_addopts = os.getenv("PYTEST_ADDOPTS", "") + return "--ddtrace" in cmdline or "--ddtrace" in pytest_addopts # Default to in-process mode return False From 936378062584dea984accdebd8a32dacdc28546d Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 18:02:52 +0200 Subject: [PATCH 08/20] explicitly enable subprocess mode --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fbacfde..fd1797e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,6 +59,7 @@ jobs: - name: Run tests env: PYTEST_ADDOPTS: '-v --ddtrace --ignore=ddtestopt' + _DDTESTOPT_SUBPROCESS_TEST_MODE: "true" DD_API_KEY: ${{ secrets.DD_API_KEY }} DD_CIVISIBILITY_AGENTLESS_ENABLED: '1' run: hatch test -i py=${{ matrix.python-version }} -i pytest=${{ matrix.pytest-version}} From 7a18183ead78354ccc4e2f77339b7689715cea1c Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Thu, 25 Sep 2025 18:25:29 +0200 Subject: [PATCH 09/20] some cleanup --- pyproject.toml | 3 - tests/subprocess_mocks.py | 172 +++++++++++++++----------------------- 2 files changed, 68 insertions(+), 107 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c266737..9f28fb5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,9 +34,6 @@ parallel = true retries = 2 retry-delay = 1 -[tool.hatch.envs.hatch-test.env-vars] -_DD_CIVISIBILITY_ITR_FORCE_ENABLE_COVERAGE = "true" - [[tool.hatch.envs.hatch-test.matrix]] python = ["3.13", "3.12"] pytest = ["8.1.2", "8.4.*"] diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py index a36c390..a319b51 100644 --- a/tests/subprocess_mocks.py +++ b/tests/subprocess_mocks.py @@ -73,14 +73,16 @@ def with_known_tests( self.known_tests = tests return self - def with_skippable_items(self, items: t.Set[t.Union[TestRef, SuiteRef]]) -> "SubprocessMockConfig": + def with_skippable_items(self, items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]]) -> "SubprocessMockConfig": """Set skippable test items.""" - self.skippable_items = items + if items is not None: + self.skippable_items = items return self - def with_environment_vars(self, env_vars: t.Dict[str, str]) -> "SubprocessMockConfig": + def with_environment_vars(self, env_vars: t.Optional[t.Dict[str, str]]) -> "SubprocessMockConfig": """Set additional environment variables.""" - self.environment_vars.update(env_vars) + if env_vars is not None: + self.environment_vars.update(env_vars) return self @@ -103,27 +105,28 @@ def _serialize_suite_ref(suite_ref: SuiteRef) -> t.Dict[str, str]: } -def _deserialize_test_ref(data: t.Dict[str, str]) -> TestRef: - """Deserialize a TestRef from a dictionary. +# ## DEV: Used only inside subprocess (conftest.py) +# def _deserialize_test_ref(data: t.Dict[str, str]) -> TestRef: +# """Deserialize a TestRef from a dictionary. - Note: This function is duplicated in the generated conftest.py content - for subprocess mocking. The duplication is necessary because conftest.py - needs self-contained deserialization functions. - """ - module_ref = ModuleRef(data["module_name"]) - suite_ref = SuiteRef(module_ref, data["suite_name"]) - return TestRef(suite_ref, data["test_name"]) +# Note: This function is duplicated in the generated conftest.py content +# for subprocess mocking. The duplication is necessary because conftest.py +# needs self-contained deserialization functions. +# """ +# module_ref = ModuleRef(data["module_name"]) +# suite_ref = SuiteRef(module_ref, data["suite_name"]) +# return TestRef(suite_ref, data["test_name"]) -def _deserialize_suite_ref(data: t.Dict[str, str]) -> SuiteRef: - """Deserialize a SuiteRef from a dictionary. +# def _deserialize_suite_ref(data: t.Dict[str, str]) -> SuiteRef: +# """Deserialize a SuiteRef from a dictionary. - Note: This function is duplicated in the generated conftest.py content - for subprocess mocking. The duplication is necessary because conftest.py - needs self-contained deserialization functions. - """ - module_ref = ModuleRef(data["module_name"]) - return SuiteRef(module_ref, data["suite_name"]) +# Note: This function is duplicated in the generated conftest.py content +# for subprocess mocking. The duplication is necessary because conftest.py +# needs self-contained deserialization functions. +# """ +# module_ref = ModuleRef(data["module_name"]) +# return SuiteRef(module_ref, data["suite_name"]) def serialize_mock_config(config: SubprocessMockConfig) -> t.Dict[str, str]: @@ -243,20 +246,17 @@ def create_subprocess_mock_config(**kwargs: t.Any) -> SubprocessMockConfig: config = SubprocessMockConfig() # Apply any provided configuration - if "skipping_enabled" in kwargs: - config.with_skipping_enabled(kwargs["skipping_enabled"]) - if "auto_retries_enabled" in kwargs: - config.with_auto_retries_enabled(kwargs["auto_retries_enabled"]) - if "efd_enabled" in kwargs: - config.with_early_flake_detection(kwargs["efd_enabled"]) - if "test_management_enabled" in kwargs: - config.with_test_management(kwargs["test_management_enabled"]) - if "known_tests_enabled" in kwargs: - config.with_known_tests(kwargs["known_tests_enabled"], kwargs.get("known_tests")) - if "skippable_items" in kwargs: - config.with_skippable_items(kwargs["skippable_items"]) - if "environment_vars" in kwargs: - config.with_environment_vars(kwargs["environment_vars"]) + config.with_skipping_enabled(kwargs.get("skipping_enabled", False)).with_auto_retries_enabled( + kwargs.get("auto_retries_enabled", False) + ).with_early_flake_detection(kwargs.get("efd_enabled", False)).with_test_management( + kwargs.get("test_management_enabled", False) + ).with_known_tests( + kwargs.get("known_tests_enabled", False), kwargs.get("known_tests") + ).with_skippable_items( + kwargs.get("skippable_items", None) + ).with_environment_vars( + kwargs.get("environment_vars", None) + ) return config @@ -274,13 +274,6 @@ def setup_subprocess_environment(pytester: Pytester, config: SubprocessMockConfi pytester.makeconftest(generate_conftest_content()) -# ============================================================================= -# SHARED MOCK SETUP LOGIC (Using importable modules) -# ============================================================================= - -# Mock setup logic is now in mock_setup.py for better coverage tracking - - # ============================================================================= # IN-PROCESS MOCKING SYSTEM (Uses shared logic) # ============================================================================= @@ -385,47 +378,47 @@ def setup_test_mocks( return _setup_in_process_mocks(config) -# ============================================================================= -# CONVENIENCE FUNCTIONS - SUPPORT BOTH MODES -# ============================================================================= +# # ============================================================================= +# # CONVENIENCE FUNCTIONS - SUPPORT BOTH MODES +# # ============================================================================= -def setup_basic_mocks( - pytester: Pytester, subprocess_mode: t.Optional[bool] = None -) -> t.Optional[t.ContextManager[None]]: - """Set up basic mocks for simple test execution.""" - return setup_test_mocks(pytester, subprocess_mode=subprocess_mode) +# def setup_basic_mocks( +# pytester: Pytester, subprocess_mode: t.Optional[bool] = None +# ) -> t.Optional[t.ContextManager[None]]: +# """Set up basic mocks for simple test execution.""" +# return setup_test_mocks(pytester, subprocess_mode=subprocess_mode) -def setup_retry_mocks( - pytester: Pytester, subprocess_mode: t.Optional[bool] = None -) -> t.Optional[t.ContextManager[None]]: - """Set up mocks for auto retry functionality testing.""" - return setup_test_mocks(pytester, subprocess_mode=subprocess_mode, auto_retries_enabled=True) +# def setup_retry_mocks( +# pytester: Pytester, subprocess_mode: t.Optional[bool] = None +# ) -> t.Optional[t.ContextManager[None]]: +# """Set up mocks for auto retry functionality testing.""" +# return setup_test_mocks(pytester, subprocess_mode=subprocess_mode, auto_retries_enabled=True) -def setup_efd_mocks( - pytester: Pytester, subprocess_mode: t.Optional[bool] = None, known_tests: t.Optional[t.Set[TestRef]] = None -) -> t.Optional[t.ContextManager[None]]: - """Set up mocks for Early Flake Detection testing.""" - return setup_test_mocks( - pytester, - subprocess_mode=subprocess_mode, - efd_enabled=True, - known_tests_enabled=True, - known_tests=known_tests or set(), - ) +# def setup_efd_mocks( +# pytester: Pytester, subprocess_mode: t.Optional[bool] = None, known_tests: t.Optional[t.Set[TestRef]] = None +# ) -> t.Optional[t.ContextManager[None]]: +# """Set up mocks for Early Flake Detection testing.""" +# return setup_test_mocks( +# pytester, +# subprocess_mode=subprocess_mode, +# efd_enabled=True, +# known_tests_enabled=True, +# known_tests=known_tests or set(), +# ) -def setup_itr_mocks( - pytester: Pytester, - subprocess_mode: t.Optional[bool] = None, - skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None, -) -> t.Optional[t.ContextManager[None]]: - """Set up mocks for Intelligent Test Runner testing.""" - return setup_test_mocks( - pytester, subprocess_mode=subprocess_mode, skipping_enabled=True, skippable_items=skippable_items or set() - ) +# def setup_itr_mocks( +# pytester: Pytester, +# subprocess_mode: t.Optional[bool] = None, +# skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None, +# ) -> t.Optional[t.ContextManager[None]]: +# """Set up mocks for Intelligent Test Runner testing.""" +# return setup_test_mocks( +# pytester, subprocess_mode=subprocess_mode, skipping_enabled=True, skippable_items=skippable_items or set() +# ) # ============================================================================= @@ -474,32 +467,3 @@ def run_test_with_mocks( else: # Subprocess mode return pytester.runpytest_subprocess(*pytest_args) - - -# ============================================================================= -# BACKWARD COMPATIBILITY - SUBPROCESS-ONLY FUNCTIONS -# ============================================================================= - - -def setup_basic_subprocess_mocks(pytester: Pytester) -> None: - """Set up basic mocks for simple test execution (subprocess only).""" - setup_test_mocks(pytester, subprocess_mode=True) - - -def setup_retry_subprocess_mocks(pytester: Pytester) -> None: - """Set up mocks for auto retry functionality testing (subprocess only).""" - setup_test_mocks(pytester, subprocess_mode=True, auto_retries_enabled=True) - - -def setup_efd_subprocess_mocks(pytester: Pytester, known_tests: t.Optional[t.Set[TestRef]] = None) -> None: - """Set up mocks for Early Flake Detection testing (subprocess only).""" - setup_test_mocks( - pytester, subprocess_mode=True, efd_enabled=True, known_tests_enabled=True, known_tests=known_tests or set() - ) - - -def setup_itr_subprocess_mocks( - pytester: Pytester, skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None -) -> None: - """Set up mocks for Intelligent Test Runner testing (subprocess only).""" - setup_test_mocks(pytester, subprocess_mode=True, skipping_enabled=True, skippable_items=skippable_items or set()) From 4dc79daaad385afc0d15c2c478b02d46412560d5 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 11:05:51 +0200 Subject: [PATCH 10/20] refactor --- tests/fixtures.py | 184 +++++++++++++++ tests/mock_setup.py | 140 +++++++++--- tests/mocks.py | 3 +- tests/subprocess_mocks.py | 469 -------------------------------------- tests/test_integration.py | 92 +++++--- 5 files changed, 355 insertions(+), 533 deletions(-) create mode 100644 tests/fixtures.py delete mode 100644 tests/subprocess_mocks.py diff --git a/tests/fixtures.py b/tests/fixtures.py new file mode 100644 index 0000000..ea5c110 --- /dev/null +++ b/tests/fixtures.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +"""Simple test fixtures for integration tests. + +This module provides a simplified approach to test configuration using plain +Python objects instead of complex serialization/deserialization. +""" + +from contextlib import contextmanager +from dataclasses import asdict +import json +import os +import typing as t + +from _pytest.pytester import Pytester + +# from ddtestopt.internal.test_data import SuiteRef +# from ddtestopt.internal.test_data import TestRef +from ddtestopt.internal.utils import asbool +from tests.mock_setup import MockFixture +from tests.mock_setup import setup_mocks_for_in_process + + +# def test_ref_to_nodeid(test_ref: TestRef) -> str: +# """Convert TestRef to pytest nodeid string.""" +# return f"{test_ref.suite.name}::{test_ref.name}" + + +# def suite_ref_to_nodeid(suite_ref: SuiteRef) -> str: +# """Convert SuiteRef to pytest nodeid string.""" +# return suite_ref.name + + +def create_fixture_with_nodeids( + skipping_enabled: bool = False, + auto_retries_enabled: bool = False, + efd_enabled: bool = False, + test_management_enabled: bool = False, + known_tests_enabled: bool = False, + skippable_items: t.Optional[t.List[str]] = None, + known_tests: t.Optional[t.List[str]] = None, + env_vars: t.Optional[t.Dict[str, str]] = None, +) -> MockFixture: + """Create a MockFixture directly with pytest nodeids (much simpler API). + + Examples: + - skippable_items=["test_file.py::test_name", "other_file.py"] + - known_tests=["test_file.py::test_function"] + """ + return MockFixture( + skipping_enabled=skipping_enabled, + auto_retries_enabled=auto_retries_enabled, + efd_enabled=efd_enabled, + test_management_enabled=test_management_enabled, + known_tests_enabled=known_tests_enabled, + skippable_items=skippable_items or [], + known_tests=known_tests or [], + env_vars=env_vars or {}, + ) + + +def get_subprocess_test_mode() -> bool: + """Get the test execution mode from environment variable. + + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=1 to force subprocess execution. + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=0 to force in-process execution. + """ + return asbool(os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE", "0")) + + +@contextmanager +def setup_test_mode_with_fixture( + pytester: Pytester, + fixture: MockFixture, + subprocess_mode: t.Optional[bool] = None, +) -> t.Generator[None, None, None]: + """Set up test environment with the given fixture. + + This is the main entry point that handles both subprocess and in-process modes. + """ + if subprocess_mode is None: + subprocess_mode = get_subprocess_test_mode() + + if subprocess_mode: + # Subprocess mode: create fixture file and static conftest.py + with _setup_subprocess_mode(pytester, fixture): + yield + else: + # In-process mode: use context manager + with _setup_in_process_mode(fixture): + yield + + +@contextmanager +def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Generator[None, None, None]: + """Set up subprocess mode with fixture file.""" + # Create fixture file in test directory + fixture_path = pytester.makefile(".json", fixture=json.dumps(asdict(fixture))) + + # Set environment variable to point to fixture file + pytester._monkeypatch.setenv("DDTESTOPT_FIXTURE_PATH", str(fixture_path)) + + # Set standard test environment variables + pytester._monkeypatch.setenv("DD_API_KEY", "test-api-key") + pytester._monkeypatch.setenv("DD_SERVICE", "test-service") + pytester._monkeypatch.setenv("DD_ENV", "test-env") + + if fixture.env_vars: + # Set additional environment variables from fixture + for key, value in fixture.env_vars.items(): + pytester._monkeypatch.setenv(key, value) + + # Create static conftest.py (will be created later) + _create_static_conftest(pytester) + + yield + + +@contextmanager +def _setup_in_process_mode(fixture: MockFixture) -> t.Generator[None, None, None]: + """Set up in-process mode using mock_setup module.""" + + # Use the fixture directly with the simplified mock_setup + with setup_mocks_for_in_process(fixture): + yield + + +def _create_static_conftest(pytester: Pytester) -> None: + """Create static conftest.py that reads fixture files.""" + conftest_content = '''#!/usr/bin/env python3 +"""Auto-generated conftest.py for fixture-based mocking.""" + +import json +import os +import sys +from pathlib import Path + +# Add parent directory to path for imports +test_dir = Path(__file__).parent.parent +if str(test_dir) not in sys.path: + sys.path.insert(0, str(test_dir)) + +from tests.mock_setup import MockFixture, setup_mocks_for_subprocess + + +def _setup_mocks_from_fixture(): + """Set up mocks by reading fixture file.""" + fixture_path = os.getenv('DDTESTOPT_FIXTURE_PATH') + if not fixture_path: + return + + # Read fixture file and create fixture object + with open(fixture_path, 'r') as f: + fixture_data = json.load(f) + + fixture = MockFixture(**fixture_data) + + # Set up mocks using the simplified interface + setup_mocks_for_subprocess(fixture) + + +# Set up mocks as early as possible +_setup_mocks_from_fixture() +''' + pytester.makeconftest(conftest_content) + + +def run_test_with_fixture( + pytester: Pytester, + pytest_args: t.List[str], + fixture: MockFixture, + subprocess_mode: t.Optional[bool] = None, +) -> t.Any: + """Run a test with the given fixture configuration. + + This is the main utility function that replaces run_test_with_mocks. + """ + if subprocess_mode is None: + subprocess_mode = get_subprocess_test_mode() + + with setup_test_mode_with_fixture(pytester, fixture, subprocess_mode): + if subprocess_mode: + return pytester.runpytest_subprocess(*pytest_args) + else: + return pytester.runpytest(*pytest_args) diff --git a/tests/mock_setup.py b/tests/mock_setup.py index c9f3d27..b39ede4 100644 --- a/tests/mock_setup.py +++ b/tests/mock_setup.py @@ -9,30 +9,31 @@ Now uses builders from mocks.py for consistent mock creation. """ +from dataclasses import dataclass import typing as t from unittest.mock import Mock from unittest.mock import patch +from ddtestopt.internal.test_data import ModuleRef +from ddtestopt.internal.test_data import SuiteRef +from ddtestopt.internal.test_data import TestRef -class MockConfig: - """Configuration object for mock setup.""" +from tests.mocks import APIClientMockBuilder +from tests.mocks import BackendConnectorMockBuilder +from tests.mocks import get_mock_git_instance - def __init__(self, api_client_config: t.Dict[str, t.Any], skippable_items: t.Set[t.Any], known_tests: t.Set[t.Any]): - self.api_client_config = api_client_config - self.skippable_items = skippable_items - self.known_tests = known_tests +from contextlib import contextmanager -def create_mock_objects(config: MockConfig) -> t.Dict[str, t.Any]: - """Create all mock objects based on configuration using builders from mocks.py. +def create_mock_objects_from_fixture(fixture: t.Any) -> t.Dict[str, t.Any]: + """Create all mock objects based on MockFixture configuration using builders from mocks.py. + + Args: + fixture: MockFixture object with test configuration Returns: Dictionary containing all mock objects """ - # Import builders from mocks.py to avoid import cycles at module level - from tests.mocks import APIClientMockBuilder - from tests.mocks import BackendConnectorMockBuilder - from tests.mocks import get_mock_git_instance # Create mock git instance using existing helper mock_git_instance = get_mock_git_instance() @@ -45,19 +46,17 @@ def create_mock_objects(config: MockConfig) -> t.Dict[str, t.Any]: # Create mock backend connector using builder mock_connector = BackendConnectorMockBuilder().build() - # Create API client mock using builder with configuration + # Create API client mock using builder with fixture configuration api_builder = APIClientMockBuilder() - api_builder.with_skipping_enabled( - enabled=config.api_client_config.get("skipping_enabled", False) - ).with_auto_retries(enabled=config.api_client_config.get("auto_retries_enabled", False)).with_early_flake_detection( - enabled=config.api_client_config.get("efd_enabled", False) - ).with_test_management( - enabled=config.api_client_config.get("test_management_enabled", False) + api_builder.with_skipping_enabled(enabled=fixture.skipping_enabled).with_auto_retries( + enabled=fixture.auto_retries_enabled + ).with_early_flake_detection(enabled=fixture.efd_enabled).with_test_management( + enabled=fixture.test_management_enabled ).with_known_tests( - enabled=config.api_client_config.get("known_tests_enabled", False), tests=config.known_tests + enabled=fixture.known_tests_enabled, tests=fixture.parsed_known_tests ).with_skippable_items( - config.skippable_items + fixture.parsed_skippable_items ) mock_api_client = api_builder.build() @@ -91,12 +90,15 @@ def create_patchers(mock_objects: t.Dict[str, t.Any]) -> t.List[t.Any]: return patchers -def setup_mocks_for_subprocess(config: MockConfig) -> None: +def setup_mocks_for_subprocess(fixture: t.Any) -> None: """Set up mocks for subprocess execution (called from conftest.py). This function starts all patches and leaves them running for the subprocess. + + Args: + fixture: MockFixture object with test configuration """ - mock_objects = create_mock_objects(config) + mock_objects = create_mock_objects_from_fixture(fixture) patchers = create_patchers(mock_objects) # Start all patches for subprocess mode @@ -104,17 +106,19 @@ def setup_mocks_for_subprocess(config: MockConfig) -> None: patcher.start() -def setup_mocks_for_in_process(config: MockConfig) -> t.ContextManager[None]: +def setup_mocks_for_in_process(fixture: t.Any) -> t.ContextManager[None]: """Set up mocks for in-process execution. + Args: + fixture: MockFixture object with test configuration + Returns: Context manager that manages patch lifecycle """ - from contextlib import contextmanager @contextmanager def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: - mock_objects = create_mock_objects(config) + mock_objects = create_mock_objects_from_fixture(fixture) patchers = create_patchers(mock_objects) # Start all patches @@ -129,3 +133,87 @@ def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: patcher.stop() return _mock_context() + + +def nodeid_to_test_ref(nodeid: str) -> TestRef: + """Convert pytest nodeid to TestRef object. + + Example: "test_file.py::test_name" → TestRef(...) + """ + if "::" not in nodeid: + raise ValueError(f"Invalid test nodeid (missing '::'): {nodeid}") + + file_path, test_name = nodeid.split("::", 1) + module_ref = ModuleRef(".") + suite_ref = SuiteRef(module_ref, file_path) + return TestRef(suite_ref, test_name) + + +def nodeid_to_suite_ref(nodeid: str) -> SuiteRef: + """Convert pytest nodeid to SuiteRef object. + + Example: "test_file.py" → SuiteRef(...) + """ + if "::" in nodeid: + raise ValueError(f"Cannot convert test nodeid to suite: {nodeid}") + + file_path = nodeid + module_ref = ModuleRef(".") + return SuiteRef(module_ref, file_path) + + +@dataclass +class MockFixture: + """Simple test fixture configuration using pytest nodeids. + + Uses simple strings (pytest nodeids) for much simpler JSON serialization. + Examples: + - "test_file.py::test_name" for individual tests + - "test_file.py" for entire test files/suites + """ + + # API client settings + skipping_enabled: bool = False + auto_retries_enabled: bool = False + efd_enabled: bool = False + test_management_enabled: bool = False + known_tests_enabled: bool = False + + # Simple string lists - much easier to serialize/deserialize + skippable_items: t.Optional[t.List[str]] = None # pytest nodeids + known_tests: t.Optional[t.List[str]] = None # pytest nodeids + + # Environment variables for the test + env_vars: t.Optional[t.Dict[str, str]] = None + + def __post_init__(self) -> None: + """Initialize empty containers if None.""" + if self.skippable_items is None: + self.skippable_items = [] + if self.known_tests is None: + self.known_tests = [] + if self.env_vars is None: + self.env_vars = {} + + @property + def parsed_skippable_items(self) -> t.Set[t.Union[TestRef, SuiteRef]]: + """Parse skippable nodeids to TestRef/SuiteRef objects.""" + items: t.Set[t.Union[TestRef, SuiteRef]] = set() + if not self.skippable_items: + return items + + for nodeid in self.skippable_items: + if "::" in nodeid: + # It's a test reference + items.add(nodeid_to_test_ref(nodeid)) + else: + # It's a suite/file reference + items.add(nodeid_to_suite_ref(nodeid)) + return items + + @property + def parsed_known_tests(self) -> t.Set[TestRef]: + """Parse known test nodeids to TestRef objects.""" + if not self.known_tests: + return set() + return {nodeid_to_test_ref(nodeid) for nodeid in self.known_tests} diff --git a/tests/mocks.py b/tests/mocks.py index f16c0a9..fde5294 100644 --- a/tests/mocks.py +++ b/tests/mocks.py @@ -30,6 +30,8 @@ from ddtestopt.internal.test_data import TestSession from ddtestopt.internal.test_data import TestSuite +from contextlib import ExitStack + def get_mock_git_instance() -> Mock: mock_git_instance = Mock() @@ -528,7 +530,6 @@ def setup_standard_mocks() -> t.ContextManager[t.Any]: def network_mocks() -> t.ContextManager[t.Any]: """Create comprehensive mocks that prevent ALL network calls at multiple levels.""" - from contextlib import ExitStack def _create_stack() -> t.ContextManager[t.Any]: stack = ExitStack() diff --git a/tests/subprocess_mocks.py b/tests/subprocess_mocks.py deleted file mode 100644 index a319b51..0000000 --- a/tests/subprocess_mocks.py +++ /dev/null @@ -1,469 +0,0 @@ -#!/usr/bin/env python3 -"""Unified mocking utilities for test_integration.py. - -This module provides utilities to set up mocks for both subprocess and in-process -pytest execution modes. It supports: - -1. Subprocess mode: Serializing mock configuration to environment variables and - creating a conftest.py file that sets up mocks in the subprocess -2. In-process mode: Using traditional context managers for mocking - -The interface is designed to be mode-agnostic, allowing tests to switch between -execution modes with minimal changes. - -CONSOLIDATION NOTE: This module now uses builders from tests/mocks.py internally -via tests/mock_setup.py to eliminate code duplication and ensure consistent -mock behavior across all test modes. -""" - -from contextlib import contextmanager -import json -import os -import sys -import typing as t - -from _pytest.pytester import Pytester - -from ddtestopt.internal.test_data import ModuleRef -from ddtestopt.internal.test_data import SuiteRef -from ddtestopt.internal.test_data import TestRef - - -class SubprocessMockConfig: - """Configuration for mocks in subprocess environment.""" - - def __init__(self) -> None: - self.api_client_config = { - "skipping_enabled": False, - "auto_retries_enabled": False, - "efd_enabled": False, - "test_management_enabled": False, - "known_tests_enabled": False, - } - self.skippable_items: t.Set[t.Union[TestRef, SuiteRef]] = set() - self.known_tests: t.Set[TestRef] = set() - self.environment_vars: t.Dict[str, str] = {} - - def with_skipping_enabled(self, enabled: bool = True) -> "SubprocessMockConfig": - """Enable/disable test skipping.""" - self.api_client_config["skipping_enabled"] = enabled - return self - - def with_auto_retries_enabled(self, enabled: bool = True) -> "SubprocessMockConfig": - """Enable/disable auto retries.""" - self.api_client_config["auto_retries_enabled"] = enabled - return self - - def with_early_flake_detection(self, enabled: bool = True) -> "SubprocessMockConfig": - """Enable/disable early flake detection.""" - self.api_client_config["efd_enabled"] = enabled - return self - - def with_test_management(self, enabled: bool = True) -> "SubprocessMockConfig": - """Enable/disable test management.""" - self.api_client_config["test_management_enabled"] = enabled - return self - - def with_known_tests( - self, enabled: bool = True, tests: t.Optional[t.Set[TestRef]] = None - ) -> "SubprocessMockConfig": - """Configure known tests.""" - self.api_client_config["known_tests_enabled"] = enabled - if tests is not None: - self.known_tests = tests - return self - - def with_skippable_items(self, items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]]) -> "SubprocessMockConfig": - """Set skippable test items.""" - if items is not None: - self.skippable_items = items - return self - - def with_environment_vars(self, env_vars: t.Optional[t.Dict[str, str]]) -> "SubprocessMockConfig": - """Set additional environment variables.""" - if env_vars is not None: - self.environment_vars.update(env_vars) - return self - - -def _serialize_test_ref(test_ref: TestRef) -> t.Dict[str, str]: - """Serialize a TestRef to a dictionary.""" - return { - "type": "TestRef", - "module_name": test_ref.suite.module.name, - "suite_name": test_ref.suite.name, - "test_name": test_ref.name, - } - - -def _serialize_suite_ref(suite_ref: SuiteRef) -> t.Dict[str, str]: - """Serialize a SuiteRef to a dictionary.""" - return { - "type": "SuiteRef", - "module_name": suite_ref.module.name, - "suite_name": suite_ref.name, - } - - -# ## DEV: Used only inside subprocess (conftest.py) -# def _deserialize_test_ref(data: t.Dict[str, str]) -> TestRef: -# """Deserialize a TestRef from a dictionary. - -# Note: This function is duplicated in the generated conftest.py content -# for subprocess mocking. The duplication is necessary because conftest.py -# needs self-contained deserialization functions. -# """ -# module_ref = ModuleRef(data["module_name"]) -# suite_ref = SuiteRef(module_ref, data["suite_name"]) -# return TestRef(suite_ref, data["test_name"]) - - -# def _deserialize_suite_ref(data: t.Dict[str, str]) -> SuiteRef: -# """Deserialize a SuiteRef from a dictionary. - -# Note: This function is duplicated in the generated conftest.py content -# for subprocess mocking. The duplication is necessary because conftest.py -# needs self-contained deserialization functions. -# """ -# module_ref = ModuleRef(data["module_name"]) -# return SuiteRef(module_ref, data["suite_name"]) - - -def serialize_mock_config(config: SubprocessMockConfig) -> t.Dict[str, str]: - """Serialize mock configuration to environment variables.""" - env_vars = {} - - # Serialize API client config - env_vars["DDTESTOPT_MOCK_API_CONFIG"] = json.dumps(config.api_client_config) - - # Serialize skippable items - skippable_data = [] - for item in config.skippable_items: - if isinstance(item, TestRef): - skippable_data.append(_serialize_test_ref(item)) - elif isinstance(item, SuiteRef): - skippable_data.append(_serialize_suite_ref(item)) - env_vars["DDTESTOPT_MOCK_SKIPPABLE_ITEMS"] = json.dumps(skippable_data) - - # Serialize known tests - known_tests_data = [_serialize_test_ref(test_ref) for test_ref in config.known_tests] - env_vars["DDTESTOPT_MOCK_KNOWN_TESTS"] = json.dumps(known_tests_data) - - # Add additional environment variables - env_vars.update(config.environment_vars) - - # Add standard test environment variables - env_vars.update( - { - "DD_API_KEY": "test-api-key", - "DD_SERVICE": "test-service", - "DD_ENV": "test-env", - "DDTESTOPT_SUBPROCESS_MOCKING": "true", - } - ) - - return env_vars - - -def generate_conftest_content() -> str: - """Generate conftest.py content for subprocess mocking using importable modules.""" - return '''#!/usr/bin/env python3 -"""Auto-generated conftest.py for subprocess mocking.""" - -import json -import os -import pytest - -# Import the mock utilities we need -import sys -from pathlib import Path - -# Add the parent directory to the path so we can import our test utilities -test_dir = Path(__file__).parent.parent -if str(test_dir) not in sys.path: - sys.path.insert(0, str(test_dir)) - -from ddtestopt.internal.test_data import ModuleRef, SuiteRef, TestRef -from tests.mock_setup import MockConfig, setup_mocks_for_subprocess - - -def _deserialize_test_ref(data): - """Deserialize a TestRef from a dictionary.""" - module_ref = ModuleRef(data['module_name']) - suite_ref = SuiteRef(module_ref, data['suite_name']) - return TestRef(suite_ref, data['test_name']) - - -def _deserialize_suite_ref(data): - """Deserialize a SuiteRef from a dictionary.""" - module_ref = ModuleRef(data['module_name']) - return SuiteRef(module_ref, data['suite_name']) - - -def _setup_subprocess_mocks(): - """Set up mocks based on environment variables using importable module.""" - if not os.getenv('DDTESTOPT_SUBPROCESS_MOCKING'): - return - - # Parse API client configuration - api_config_str = os.getenv('DDTESTOPT_MOCK_API_CONFIG', '{}') - api_config = json.loads(api_config_str) - - # Parse skippable items - skippable_items_str = os.getenv('DDTESTOPT_MOCK_SKIPPABLE_ITEMS', '[]') - skippable_items_data = json.loads(skippable_items_str) - skippable_items = set() - for item_data in skippable_items_data: - if item_data['type'] == 'TestRef': - skippable_items.add(_deserialize_test_ref(item_data)) - elif item_data['type'] == 'SuiteRef': - skippable_items.add(_deserialize_suite_ref(item_data)) - - # Parse known tests - known_tests_str = os.getenv('DDTESTOPT_MOCK_KNOWN_TESTS', '[]') - known_tests_data = json.loads(known_tests_str) - known_tests = {_deserialize_test_ref(test_data) for test_data in known_tests_data} - - # Create configuration object and set up mocks using importable module - config = MockConfig(api_config, skippable_items, known_tests) - setup_mocks_for_subprocess(config) - - -# Set up mocks as early as possible -_setup_subprocess_mocks() - - -@pytest.fixture(autouse=True) -def ensure_mocks_are_active(): - """Ensure mocks are active for all tests.""" - # This fixture runs for every test, ensuring mocks stay active - pass -''' - - -def create_subprocess_mock_config(**kwargs: t.Any) -> SubprocessMockConfig: - """Create a SubprocessMockConfig with sensible defaults.""" - config = SubprocessMockConfig() - - # Apply any provided configuration - config.with_skipping_enabled(kwargs.get("skipping_enabled", False)).with_auto_retries_enabled( - kwargs.get("auto_retries_enabled", False) - ).with_early_flake_detection(kwargs.get("efd_enabled", False)).with_test_management( - kwargs.get("test_management_enabled", False) - ).with_known_tests( - kwargs.get("known_tests_enabled", False), kwargs.get("known_tests") - ).with_skippable_items( - kwargs.get("skippable_items", None) - ).with_environment_vars( - kwargs.get("environment_vars", None) - ) - - return config - - -def setup_subprocess_environment(pytester: Pytester, config: SubprocessMockConfig) -> None: - """Set up the subprocess environment with mocks.""" - # Serialize configuration to environment variables - env_vars = serialize_mock_config(config) - - # Set environment variables using pytester's mechanism - for key, value in env_vars.items(): - pytester._monkeypatch.setenv(key, value) - - # Create conftest.py in the test directory - pytester.makeconftest(generate_conftest_content()) - - -# ============================================================================= -# IN-PROCESS MOCKING SYSTEM (Uses shared logic) -# ============================================================================= - - -@contextmanager -def _setup_in_process_mocks(config: SubprocessMockConfig) -> t.Generator[t.Any, t.Any, t.Any]: - """Set up mocks for in-process testing using importable module.""" - from tests.mock_setup import MockConfig - from tests.mock_setup import setup_mocks_for_in_process - - # Convert SubprocessMockConfig to MockConfig - mock_config = MockConfig( - api_client_config=config.api_client_config, - skippable_items=config.skippable_items, - known_tests=config.known_tests, - ) - - # Use the importable module for mock setup - with setup_mocks_for_in_process(mock_config): - yield - - -# ============================================================================= -# UNIFIED INTERFACE - SUPPORTS BOTH MODES -# ============================================================================= - - -def as_bool(val: str) -> bool: - return val.strip().lower() in ("true", "1") - - -def get_subprocess_test_mode() -> bool: - """Get the test execution mode from environment variable or ddtrace plugin detection. - - Auto-detection logic: - 1. If _DDTESTOPT_SUBPROCESS_TEST_MODE is explicitly set, use that value - 2. If ddtrace pytest plugin is active, use subprocess mode for isolation - 3. Otherwise, default to in-process mode for speed - - Set _DDTESTOPT_SUBPROCESS_TEST_MODE=true to force subprocess execution. - Set _DDTESTOPT_SUBPROCESS_TEST_MODE=false to force in-process execution. - """ - # Check for explicit environment variable first - env_val = os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE") - if env_val is not None: - return as_bool(env_val) - - # Check if the ddtrace pytest plugin is imported and active - # Check if ddtrace plugin is loaded but not disabled - if "ddtrace" in sys.modules: - # The plugin module is loaded, now check if it's active - # Look at sys.argv to see if ddtrace was explicitly disabled - cmdline = " ".join(sys.argv) - pytest_addopts = os.getenv("PYTEST_ADDOPTS", "") - return "--ddtrace" in cmdline or "--ddtrace" in pytest_addopts - - # Default to in-process mode - return False - - -def setup_test_mocks( - pytester: Pytester, subprocess_mode: t.Optional[bool] = None, **config_kwargs: t.Any -) -> t.Optional[t.ContextManager[None]]: - """Unified interface for setting up test mocks. - - Args: - pytester: The pytest Pytester instance - mode: Either "subprocess" or "in-process". If None, uses DDTESTOPT_TEST_MODE env var - **config_kwargs: Configuration options for the mocks - - Returns: - None for subprocess mode, context manager for in-process mode - - Example: - # Subprocess mode (new) - setup_test_mocks(pytester, mode="subprocess") - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-v") - - # In-process mode (original) - with setup_test_mocks(pytester, mode="in-process"): - result = pytester.runpytest("-p", "ddtestopt", "-v") - - # Environment-controlled mode - # Set DDTESTOPT_TEST_MODE=in-process before running tests - context = setup_test_mocks(pytester) # Uses env var - if context: - with context: - result = pytester.runpytest("-p", "ddtestopt", "-v") - else: - result = pytester.runpytest_subprocess("-p", "ddtestopt", "-v") - """ - if subprocess_mode is None: - subprocess_mode = get_subprocess_test_mode() - - config = create_subprocess_mock_config(**config_kwargs) - - if subprocess_mode: - setup_subprocess_environment(pytester, config) - return None - - return _setup_in_process_mocks(config) - - -# # ============================================================================= -# # CONVENIENCE FUNCTIONS - SUPPORT BOTH MODES -# # ============================================================================= - - -# def setup_basic_mocks( -# pytester: Pytester, subprocess_mode: t.Optional[bool] = None -# ) -> t.Optional[t.ContextManager[None]]: -# """Set up basic mocks for simple test execution.""" -# return setup_test_mocks(pytester, subprocess_mode=subprocess_mode) - - -# def setup_retry_mocks( -# pytester: Pytester, subprocess_mode: t.Optional[bool] = None -# ) -> t.Optional[t.ContextManager[None]]: -# """Set up mocks for auto retry functionality testing.""" -# return setup_test_mocks(pytester, subprocess_mode=subprocess_mode, auto_retries_enabled=True) - - -# def setup_efd_mocks( -# pytester: Pytester, subprocess_mode: t.Optional[bool] = None, known_tests: t.Optional[t.Set[TestRef]] = None -# ) -> t.Optional[t.ContextManager[None]]: -# """Set up mocks for Early Flake Detection testing.""" -# return setup_test_mocks( -# pytester, -# subprocess_mode=subprocess_mode, -# efd_enabled=True, -# known_tests_enabled=True, -# known_tests=known_tests or set(), -# ) - - -# def setup_itr_mocks( -# pytester: Pytester, -# subprocess_mode: t.Optional[bool] = None, -# skippable_items: t.Optional[t.Set[t.Union[TestRef, SuiteRef]]] = None, -# ) -> t.Optional[t.ContextManager[None]]: -# """Set up mocks for Intelligent Test Runner testing.""" -# return setup_test_mocks( -# pytester, subprocess_mode=subprocess_mode, skipping_enabled=True, skippable_items=skippable_items or set() -# ) - - -# ============================================================================= -# UTILITY FUNCTIONS FOR DUAL-MODE EXECUTION -# ============================================================================= - - -def run_test_with_mocks( - pytester: Pytester, pytest_args: t.List[str], subprocess_mode: t.Optional[bool] = None, **mock_config: t.Any -) -> t.Any: - """Run a test with appropriate mocking based on the mode. - - This utility function handles the conditional execution pattern: - - For subprocess mode: sets up mocks and runs runpytest_subprocess() - - For in-process mode: uses context manager and runs runpytest() - - Args: - pytester: The pytest Pytester instance - pytest_args: Arguments to pass to pytest - mode: Test execution mode (if None, uses environment variable) - **mock_config: Mock configuration options - - Returns: - The result from pytester.runpytest() or pytester.runpytest_subprocess() - - Example: - # Simple usage - mode determined by environment - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-v"]) - - # With specific configuration - result = run_test_with_mocks( - pytester, - ["-p", "ddtestopt", "-v"], - auto_retries_enabled=True - ) - """ - if subprocess_mode is None: - subprocess_mode = get_subprocess_test_mode() - - context = setup_test_mocks(pytester, subprocess_mode=subprocess_mode, **mock_config) - - if context is not None: - # In-process mode - with context: - return pytester.runpytest(*pytest_args) - else: - # Subprocess mode - return pytester.runpytest_subprocess(*pytest_args) diff --git a/tests/test_integration.py b/tests/test_integration.py index 45ce537..93a5952 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -9,13 +9,11 @@ import pytest from ddtestopt.internal.session_manager import SessionManager -from ddtestopt.internal.test_data import ModuleRef -from ddtestopt.internal.test_data import SuiteRef -from ddtestopt.internal.test_data import TestRef from ddtestopt.internal.test_data import TestSession +from tests.fixtures import create_fixture_with_nodeids +from tests.fixtures import run_test_with_fixture from tests.mocks import mock_api_client_settings from tests.mocks import setup_standard_mocks -from tests.subprocess_mocks import run_test_with_mocks # Functions moved to tests.mocks for centralization @@ -36,8 +34,11 @@ def test_simple(): """ ) - # Run test with automatic mode detection (subprocess by default, in-process if ddtrace not loaded) - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) + # Create simple fixture with default settings + fixture = create_fixture_with_nodeids() + + # Run test with automatic mode detection + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Test should pass assert result.ret == 0 @@ -62,10 +63,11 @@ def test_passes(): # Set retry-related environment variables pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") - # Run test with automatic mode detection and retry configuration - result = run_test_with_mocks( - pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], auto_retries_enabled=True - ) + # Create fixture with auto retries enabled + fixture = create_fixture_with_nodeids(auto_retries_enabled=True, env_vars={"DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2"}) + + # Run test with auto retries configuration + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that the test failed after retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -109,18 +111,14 @@ def test_known_test(): """ ) - # Set up known tests - only include the "known" test - known_suite = SuiteRef(ModuleRef("."), "test_efd.py") - known_test_ref = TestRef(known_suite, "test_known_test") - - # Run test with automatic mode detection and EFD configuration - result = run_test_with_mocks( - pytester, - ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], - efd_enabled=True, - known_tests_enabled=True, - known_tests={known_test_ref}, - ) + # Define the known test for this test scenario using simple nodeid + known_test_nodeid = "test_efd.py::test_known_test" + + # Create fixture with EFD enabled and known tests + fixture = create_fixture_with_nodeids(efd_enabled=True, known_tests_enabled=True, known_tests=[known_test_nodeid]) + + # Run test with EFD configuration + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that the test failed after EFD retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -163,17 +161,14 @@ def test_should_run(): """ ) - # Set up skippable tests - mark one test as skippable - skippable_suite = SuiteRef(ModuleRef("."), "test_itr.py") - skippable_test_ref = TestRef(skippable_suite, "test_should_be_skipped") + # Define the skippable test for this test scenario using simple nodeid + skippable_test_nodeid = "test_itr.py::test_should_be_skipped" + + # Create fixture with skipping enabled + fixture = create_fixture_with_nodeids(skipping_enabled=True, skippable_items=[skippable_test_nodeid]) - # Run test with automatic mode detection and ITR configuration - result = run_test_with_mocks( - pytester, - ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], - skipping_enabled=True, - skippable_items={skippable_test_ref}, - ) + # Run test with ITR configuration + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that tests completed successfully assert result.ret == 0 # Exit code 0 indicates success @@ -214,8 +209,11 @@ def test_with_assertion(): """ ) + # Create simple fixture with default settings + fixture = create_fixture_with_nodeids() + # Run test with automatic mode detection - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Check that tests ran successfully assert result.ret == 0 @@ -237,8 +235,11 @@ def test_passing(): """ ) + # Create simple fixture with default settings + fixture = create_fixture_with_nodeids() + # Run test with automatic mode detection - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Check that one test failed and one passed assert result.ret == 1 # pytest exits with 1 when tests fail @@ -256,8 +257,11 @@ def test_plugin_loaded(): """ ) + # Create simple fixture with default settings + fixture = create_fixture_with_nodeids() + # Run test with automatic mode detection - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v"]) + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v"], fixture) # Should run without plugin loading errors assert result.ret == 0 @@ -279,8 +283,13 @@ def test_command_extraction(): """ ) + # Create simple fixture with default settings + fixture = create_fixture_with_nodeids() + # Run test with automatic mode detection - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v"]) + result = run_test_with_fixture( + pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v"], fixture + ) assert result.ret == 0 result.assert_outcomes(passed=1) @@ -309,8 +318,17 @@ def test_simple_pass(): pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") pytester._monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") + # Create fixture with environment variables + fixture = create_fixture_with_nodeids( + env_vars={ + "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED": "true", + "DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2", + "DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT": "5", + } + ) + # Run test with automatic mode detection - result = run_test_with_mocks(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"]) + result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Tests should pass assert result.ret == 0 From c50aba591ef9e79342effed21d97053776ac53bf Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 11:13:43 +0200 Subject: [PATCH 11/20] cleanup --- pyproject.toml | 4 ++-- tests/fixtures.py | 13 ------------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9f28fb5..23cb2ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,8 +49,8 @@ matrix.pytest.dependencies = [ [tool.hatch.envs.hatch-test.scripts] run = "pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt {args:tests}" -run-cov = "python -m slipcover --source ddtestopt/internal --pretty-print -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt -p no:ddtrace {args:tests}" -cov-html = "python -m slipcover --source ddtestopt --html --out slipcover.html -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt -p no:ddtrace {args:tests}" +run-cov = "python -m slipcover --source ddtestopt/internal --pretty-print -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt {args:tests}" +cov-html = "python -m slipcover --source ddtestopt --html --out slipcover.html -m pytest{env:$HATCH_TEST_ARGS:} -p no:ddtestopt {args:tests}" cov-combine = "true" cov-report = "true" diff --git a/tests/fixtures.py b/tests/fixtures.py index ea5c110..57780f7 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -13,23 +13,11 @@ from _pytest.pytester import Pytester -# from ddtestopt.internal.test_data import SuiteRef -# from ddtestopt.internal.test_data import TestRef from ddtestopt.internal.utils import asbool from tests.mock_setup import MockFixture from tests.mock_setup import setup_mocks_for_in_process -# def test_ref_to_nodeid(test_ref: TestRef) -> str: -# """Convert TestRef to pytest nodeid string.""" -# return f"{test_ref.suite.name}::{test_ref.name}" - - -# def suite_ref_to_nodeid(suite_ref: SuiteRef) -> str: -# """Convert SuiteRef to pytest nodeid string.""" -# return suite_ref.name - - def create_fixture_with_nodeids( skipping_enabled: bool = False, auto_retries_enabled: bool = False, @@ -118,7 +106,6 @@ def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Genera @contextmanager def _setup_in_process_mode(fixture: MockFixture) -> t.Generator[None, None, None]: """Set up in-process mode using mock_setup module.""" - # Use the fixture directly with the simplified mock_setup with setup_mocks_for_in_process(fixture): yield From ac7b4cd86af6e8ae509e3910257de93d2cf8cb24 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 11:20:09 +0200 Subject: [PATCH 12/20] cleanup --- tests/fixtures.py | 2 +- tests/mock_setup.py | 21 ++++++------ tests/mocks.py | 3 +- tests/test_integration.py | 68 +++++++-------------------------------- 4 files changed, 22 insertions(+), 72 deletions(-) diff --git a/tests/fixtures.py b/tests/fixtures.py index 57780f7..3b1d635 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -140,7 +140,7 @@ def _setup_mocks_from_fixture(): fixture_data = json.load(f) fixture = MockFixture(**fixture_data) - + # Set up mocks using the simplified interface setup_mocks_for_subprocess(fixture) diff --git a/tests/mock_setup.py b/tests/mock_setup.py index b39ede4..3b08e7a 100644 --- a/tests/mock_setup.py +++ b/tests/mock_setup.py @@ -9,6 +9,7 @@ Now uses builders from mocks.py for consistent mock creation. """ +from contextlib import contextmanager from dataclasses import dataclass import typing as t from unittest.mock import Mock @@ -17,13 +18,10 @@ from ddtestopt.internal.test_data import ModuleRef from ddtestopt.internal.test_data import SuiteRef from ddtestopt.internal.test_data import TestRef - from tests.mocks import APIClientMockBuilder from tests.mocks import BackendConnectorMockBuilder from tests.mocks import get_mock_git_instance -from contextlib import contextmanager - def create_mock_objects_from_fixture(fixture: t.Any) -> t.Dict[str, t.Any]: """Create all mock objects based on MockFixture configuration using builders from mocks.py. @@ -34,7 +32,6 @@ def create_mock_objects_from_fixture(fixture: t.Any) -> t.Dict[str, t.Any]: Returns: Dictionary containing all mock objects """ - # Create mock git instance using existing helper mock_git_instance = get_mock_git_instance() @@ -137,12 +134,12 @@ def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: def nodeid_to_test_ref(nodeid: str) -> TestRef: """Convert pytest nodeid to TestRef object. - + Example: "test_file.py::test_name" → TestRef(...) """ if "::" not in nodeid: raise ValueError(f"Invalid test nodeid (missing '::'): {nodeid}") - + file_path, test_name = nodeid.split("::", 1) module_ref = ModuleRef(".") suite_ref = SuiteRef(module_ref, file_path) @@ -151,12 +148,12 @@ def nodeid_to_test_ref(nodeid: str) -> TestRef: def nodeid_to_suite_ref(nodeid: str) -> SuiteRef: """Convert pytest nodeid to SuiteRef object. - + Example: "test_file.py" → SuiteRef(...) """ if "::" in nodeid: raise ValueError(f"Cannot convert test nodeid to suite: {nodeid}") - + file_path = nodeid module_ref = ModuleRef(".") return SuiteRef(module_ref, file_path) @@ -165,7 +162,7 @@ def nodeid_to_suite_ref(nodeid: str) -> SuiteRef: @dataclass class MockFixture: """Simple test fixture configuration using pytest nodeids. - + Uses simple strings (pytest nodeids) for much simpler JSON serialization. Examples: - "test_file.py::test_name" for individual tests @@ -180,8 +177,8 @@ class MockFixture: known_tests_enabled: bool = False # Simple string lists - much easier to serialize/deserialize - skippable_items: t.Optional[t.List[str]] = None # pytest nodeids - known_tests: t.Optional[t.List[str]] = None # pytest nodeids + skippable_items: t.Optional[t.List[str]] = None # pytest nodeids + known_tests: t.Optional[t.List[str]] = None # pytest nodeids # Environment variables for the test env_vars: t.Optional[t.Dict[str, str]] = None @@ -204,7 +201,7 @@ def parsed_skippable_items(self) -> t.Set[t.Union[TestRef, SuiteRef]]: for nodeid in self.skippable_items: if "::" in nodeid: - # It's a test reference + # It's a test reference items.add(nodeid_to_test_ref(nodeid)) else: # It's a suite/file reference diff --git a/tests/mocks.py b/tests/mocks.py index fde5294..46c7096 100644 --- a/tests/mocks.py +++ b/tests/mocks.py @@ -9,6 +9,7 @@ - Utility functions for common patterns """ +from contextlib import ExitStack import os from pathlib import Path import typing as t @@ -30,8 +31,6 @@ from ddtestopt.internal.test_data import TestSession from ddtestopt.internal.test_data import TestSuite -from contextlib import ExitStack - def get_mock_git_instance() -> Mock: mock_git_instance = Mock() diff --git a/tests/test_integration.py b/tests/test_integration.py index 93a5952..e397fed 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -4,7 +4,6 @@ from unittest.mock import Mock from unittest.mock import patch -from _pytest.monkeypatch import MonkeyPatch from _pytest.pytester import Pytester import pytest @@ -45,7 +44,7 @@ def test_simple(): result.assert_outcomes(passed=1) @pytest.mark.slow - def test_retry_functionality_with_pytester(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> None: + def test_retry_functionality_with_pytester(self, pytester: Pytester) -> None: """Test that failing tests are retried when auto retry is enabled.""" # Create a test file with a failing test pytester.makepyfile( @@ -64,7 +63,9 @@ def test_passes(): pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") # Create fixture with auto retries enabled - fixture = create_fixture_with_nodeids(auto_retries_enabled=True, env_vars={"DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2"}) + fixture = create_fixture_with_nodeids( + auto_retries_enabled=True, env_vars={"DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2"} + ) # Run test with auto retries configuration result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) @@ -85,7 +86,7 @@ def test_passes(): # Verify that retries happened - should see "RETRY FAILED (Auto Test Retries)" messages # DEV: We configured DD_CIVISIBILITY_FLAKY_RETRY_COUNT=2 # BUT the plugin will show 3 retry attempts (as it includes the initial attempt) - retry_messages = output.count("RETRY FAILED (Auto Test Retries)") + retry_messages = output.count("test_always_fails RETRY FAILED (Auto Test Retries)") assert retry_messages == 3, f"Expected 3 retry messages, got {retry_messages}" # Should see the final summary mentioning dd_retry @@ -113,9 +114,11 @@ def test_known_test(): # Define the known test for this test scenario using simple nodeid known_test_nodeid = "test_efd.py::test_known_test" - + # Create fixture with EFD enabled and known tests - fixture = create_fixture_with_nodeids(efd_enabled=True, known_tests_enabled=True, known_tests=[known_test_nodeid]) + fixture = create_fixture_with_nodeids( + efd_enabled=True, known_tests_enabled=True, known_tests=[known_test_nodeid] + ) # Run test with EFD configuration result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) @@ -163,7 +166,7 @@ def test_should_run(): # Define the skippable test for this test scenario using simple nodeid skippable_test_nodeid = "test_itr.py::test_should_be_skipped" - + # Create fixture with skipping enabled fixture = create_fixture_with_nodeids(skipping_enabled=True, skippable_items=[skippable_test_nodeid]) @@ -246,56 +249,7 @@ def test_passing(): result.assert_outcomes(passed=1, failed=1) @pytest.mark.slow - def test_plugin_loads_correctly(self, pytester: Pytester) -> None: - """Test that the ddtestopt plugin loads without errors.""" - # Create test file using pytester - pytester.makepyfile( - """ - def test_plugin_loaded(): - '''Test to verify plugin is loaded.''' - assert True - """ - ) - - # Create simple fixture with default settings - fixture = create_fixture_with_nodeids() - - # Run test with automatic mode detection - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-v"], fixture) - - # Should run without plugin loading errors - assert result.ret == 0 - result.assert_outcomes(passed=1) - - # Should not have any error messages about plugin loading - output = result.stdout.str() - assert "Error setting up Test Optimization plugin" not in output - - @pytest.mark.slow - def test_test_session_name_extraction(self, pytester: Pytester) -> None: - """Test that the pytest session command is properly extracted.""" - # Create test file using pytester - pytester.makepyfile( - """ - def test_command_extraction(): - '''Test for command extraction functionality.''' - assert True - """ - ) - - # Create simple fixture with default settings - fixture = create_fixture_with_nodeids() - - # Run test with automatic mode detection - result = run_test_with_fixture( - pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "--tb=short", "-x", "-v"], fixture - ) - - assert result.ret == 0 - result.assert_outcomes(passed=1) - - @pytest.mark.slow - def test_retry_environment_variables_respected(self, pytester: Pytester, monkeypatch: MonkeyPatch) -> None: + def test_retry_environment_variables_respected(self, pytester: Pytester) -> None: """Test that retry environment variables are properly read by the plugin.""" # Create test file using pytester pytester.makepyfile( From 17ee6e8e6fe6b9259cce8ec9d615626f27e63483 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 15:57:43 +0200 Subject: [PATCH 13/20] simplification --- tests/mock_setup.py | 39 ++++++++++----------------------------- 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/tests/mock_setup.py b/tests/mock_setup.py index 3b08e7a..57c8e5d 100644 --- a/tests/mock_setup.py +++ b/tests/mock_setup.py @@ -23,14 +23,14 @@ from tests.mocks import get_mock_git_instance -def create_mock_objects_from_fixture(fixture: t.Any) -> t.Dict[str, t.Any]: - """Create all mock objects based on MockFixture configuration using builders from mocks.py. +def create_patchers(fixture: t.Any) -> t.List[t.Any]: + """Create all patch objects. Args: fixture: MockFixture object with test configuration Returns: - Dictionary containing all mock objects + List of patcher objects """ # Create mock git instance using existing helper mock_git_instance = get_mock_git_instance() @@ -58,31 +58,14 @@ def create_mock_objects_from_fixture(fixture: t.Any) -> t.Dict[str, t.Any]: mock_api_client = api_builder.build() - return { - "mock_git_instance": mock_git_instance, - "mock_writer": mock_writer, - "mock_connector": mock_connector, - "mock_api_client": mock_api_client, - } - - -def create_patchers(mock_objects: t.Dict[str, t.Any]) -> t.List[t.Any]: - """Create all patch objects. - - Args: - mock_objects: Dictionary of mock objects from create_mock_objects() - - Returns: - List of patcher objects - """ patchers = [ - patch("ddtestopt.internal.session_manager.APIClient", return_value=mock_objects["mock_api_client"]), + patch("ddtestopt.internal.session_manager.APIClient", return_value=mock_api_client), patch("ddtestopt.internal.session_manager.get_git_tags", return_value={}), patch("ddtestopt.internal.session_manager.get_platform_tags", return_value={}), - patch("ddtestopt.internal.session_manager.Git", return_value=mock_objects["mock_git_instance"]), - patch("ddtestopt.internal.http.BackendConnector", return_value=mock_objects["mock_connector"]), - patch("ddtestopt.internal.writer.TestOptWriter", return_value=mock_objects["mock_writer"]), - patch("ddtestopt.internal.writer.TestCoverageWriter", return_value=mock_objects["mock_writer"]), + patch("ddtestopt.internal.session_manager.Git", return_value=mock_git_instance), + patch("ddtestopt.internal.http.BackendConnector", return_value=mock_connector), + patch("ddtestopt.internal.writer.TestOptWriter", return_value=mock_writer), + patch("ddtestopt.internal.writer.TestCoverageWriter", return_value=mock_writer), ] return patchers @@ -95,8 +78,7 @@ def setup_mocks_for_subprocess(fixture: t.Any) -> None: Args: fixture: MockFixture object with test configuration """ - mock_objects = create_mock_objects_from_fixture(fixture) - patchers = create_patchers(mock_objects) + patchers = create_patchers(fixture) # Start all patches for subprocess mode for patcher in patchers: @@ -115,8 +97,7 @@ def setup_mocks_for_in_process(fixture: t.Any) -> t.ContextManager[None]: @contextmanager def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: - mock_objects = create_mock_objects_from_fixture(fixture) - patchers = create_patchers(mock_objects) + patchers = create_patchers(fixture) # Start all patches for patcher in patchers: From dd3f927f07142e66d2d6d201019d3020b2bf0c31 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 16:07:57 +0200 Subject: [PATCH 14/20] cleanup --- tests/fixtures.py | 34 +++------------------------------- tests/mock_setup.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/tests/fixtures.py b/tests/fixtures.py index 3b1d635..60ae601 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -74,7 +74,7 @@ def setup_test_mode_with_fixture( yield else: # In-process mode: use context manager - with _setup_in_process_mode(fixture): + with setup_mocks_for_in_process(fixture): yield @@ -103,14 +103,6 @@ def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Genera yield -@contextmanager -def _setup_in_process_mode(fixture: MockFixture) -> t.Generator[None, None, None]: - """Set up in-process mode using mock_setup module.""" - # Use the fixture directly with the simplified mock_setup - with setup_mocks_for_in_process(fixture): - yield - - def _create_static_conftest(pytester: Pytester) -> None: """Create static conftest.py that reads fixture files.""" conftest_content = '''#!/usr/bin/env python3 @@ -126,27 +118,10 @@ def _create_static_conftest(pytester: Pytester) -> None: if str(test_dir) not in sys.path: sys.path.insert(0, str(test_dir)) -from tests.mock_setup import MockFixture, setup_mocks_for_subprocess - - -def _setup_mocks_from_fixture(): - """Set up mocks by reading fixture file.""" - fixture_path = os.getenv('DDTESTOPT_FIXTURE_PATH') - if not fixture_path: - return - - # Read fixture file and create fixture object - with open(fixture_path, 'r') as f: - fixture_data = json.load(f) - - fixture = MockFixture(**fixture_data) - - # Set up mocks using the simplified interface - setup_mocks_for_subprocess(fixture) - +from tests.mock_setup import _setup_subprocess_mocks_from_fixture # Set up mocks as early as possible -_setup_mocks_from_fixture() +_setup_subprocess_mocks_from_fixture() ''' pytester.makeconftest(conftest_content) @@ -161,9 +136,6 @@ def run_test_with_fixture( This is the main utility function that replaces run_test_with_mocks. """ - if subprocess_mode is None: - subprocess_mode = get_subprocess_test_mode() - with setup_test_mode_with_fixture(pytester, fixture, subprocess_mode): if subprocess_mode: return pytester.runpytest_subprocess(*pytest_args) diff --git a/tests/mock_setup.py b/tests/mock_setup.py index 57c8e5d..2fc2e41 100644 --- a/tests/mock_setup.py +++ b/tests/mock_setup.py @@ -11,6 +11,8 @@ from contextlib import contextmanager from dataclasses import dataclass +import json +import os import typing as t from unittest.mock import Mock from unittest.mock import patch @@ -195,3 +197,20 @@ def parsed_known_tests(self) -> t.Set[TestRef]: if not self.known_tests: return set() return {nodeid_to_test_ref(nodeid) for nodeid in self.known_tests} + + +# DEV: This is imported inside subprocess conftest +def _setup_subprocess_mocks_from_fixture() -> None: + """Set up mocks by reading fixture file.""" + fixture_path = os.getenv("DDTESTOPT_FIXTURE_PATH") + if not fixture_path: + return + + # Read fixture file and create fixture object + with open(fixture_path, "r") as f: + fixture_data = json.load(f) + + fixture = MockFixture(**fixture_data) + + # Set up mocks using the simplified interface + setup_mocks_for_subprocess(fixture) From 88aef57fc7d803b54a79e6fa5a4603238dba693a Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 16:12:51 +0200 Subject: [PATCH 15/20] reuse code --- tests/mock_setup.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tests/mock_setup.py b/tests/mock_setup.py index 2fc2e41..b9d3cf4 100644 --- a/tests/mock_setup.py +++ b/tests/mock_setup.py @@ -72,10 +72,10 @@ def create_patchers(fixture: t.Any) -> t.List[t.Any]: return patchers -def setup_mocks_for_subprocess(fixture: t.Any) -> None: - """Set up mocks for subprocess execution (called from conftest.py). +def start_patchers(fixture: t.Any) -> t.List[t.Any]: + """Set up mocks (called from conftest.py). - This function starts all patches and leaves them running for the subprocess. + This function starts all patches (and leaves them running for the subprocess). Args: fixture: MockFixture object with test configuration @@ -86,6 +86,8 @@ def setup_mocks_for_subprocess(fixture: t.Any) -> None: for patcher in patchers: patcher.start() + return patchers + def setup_mocks_for_in_process(fixture: t.Any) -> t.ContextManager[None]: """Set up mocks for in-process execution. @@ -99,11 +101,7 @@ def setup_mocks_for_in_process(fixture: t.Any) -> t.ContextManager[None]: @contextmanager def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: - patchers = create_patchers(fixture) - - # Start all patches - for patcher in patchers: - patcher.start() + patchers = start_patchers(fixture) try: yield @@ -213,4 +211,4 @@ def _setup_subprocess_mocks_from_fixture() -> None: fixture = MockFixture(**fixture_data) # Set up mocks using the simplified interface - setup_mocks_for_subprocess(fixture) + start_patchers(fixture) From f12f825bc3540b3aaae819b4fdc8d3992b2aa2d8 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 16:13:42 +0200 Subject: [PATCH 16/20] cleanup --- tests/fixtures.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/fixtures.py b/tests/fixtures.py index 60ae601..40334c1 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -107,9 +107,6 @@ def _create_static_conftest(pytester: Pytester) -> None: """Create static conftest.py that reads fixture files.""" conftest_content = '''#!/usr/bin/env python3 """Auto-generated conftest.py for fixture-based mocking.""" - -import json -import os import sys from pathlib import Path From 263b1643e379d1f011c593a85b4b201f6b63f3ee Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 16:20:03 +0200 Subject: [PATCH 17/20] rename --- tests/fixtures.py | 2 +- tests/test_integration.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/fixtures.py b/tests/fixtures.py index 40334c1..239b6cf 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -123,7 +123,7 @@ def _create_static_conftest(pytester: Pytester) -> None: pytester.makeconftest(conftest_content) -def run_test_with_fixture( +def run_pytest_with_fixture( pytester: Pytester, pytest_args: t.List[str], fixture: MockFixture, diff --git a/tests/test_integration.py b/tests/test_integration.py index e397fed..276a8f1 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -10,7 +10,7 @@ from ddtestopt.internal.session_manager import SessionManager from ddtestopt.internal.test_data import TestSession from tests.fixtures import create_fixture_with_nodeids -from tests.fixtures import run_test_with_fixture +from tests.fixtures import run_pytest_with_fixture from tests.mocks import mock_api_client_settings from tests.mocks import setup_standard_mocks @@ -37,7 +37,7 @@ def test_simple(): fixture = create_fixture_with_nodeids() # Run test with automatic mode detection - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Test should pass assert result.ret == 0 @@ -68,7 +68,7 @@ def test_passes(): ) # Run test with auto retries configuration - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that the test failed after retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -121,7 +121,7 @@ def test_known_test(): ) # Run test with EFD configuration - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that the test failed after EFD retries assert result.ret == 1 # Exit code 1 indicates test failures @@ -171,7 +171,7 @@ def test_should_run(): fixture = create_fixture_with_nodeids(skipping_enabled=True, skippable_items=[skippable_test_nodeid]) # Run test with ITR configuration - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) # Check that tests completed successfully assert result.ret == 0 # Exit code 0 indicates success @@ -216,7 +216,7 @@ def test_with_assertion(): fixture = create_fixture_with_nodeids() # Run test with automatic mode detection - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Check that tests ran successfully assert result.ret == 0 @@ -242,7 +242,7 @@ def test_passing(): fixture = create_fixture_with_nodeids() # Run test with automatic mode detection - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Check that one test failed and one passed assert result.ret == 1 # pytest exits with 1 when tests fail @@ -282,7 +282,7 @@ def test_simple_pass(): ) # Run test with automatic mode detection - result = run_test_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) + result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) # Tests should pass assert result.ret == 0 From c9e4c35fea78e53d51dbfb81b63b322f8b82a1a5 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 16:53:38 +0200 Subject: [PATCH 18/20] join files --- tests/fixtures.py | 140 --------- ...{mock_setup.py => integration_fixtures.py} | 266 ++++++++++++------ tests/test_integration.py | 22 +- 3 files changed, 187 insertions(+), 241 deletions(-) delete mode 100644 tests/fixtures.py rename tests/{mock_setup.py => integration_fixtures.py} (64%) diff --git a/tests/fixtures.py b/tests/fixtures.py deleted file mode 100644 index 239b6cf..0000000 --- a/tests/fixtures.py +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env python3 -"""Simple test fixtures for integration tests. - -This module provides a simplified approach to test configuration using plain -Python objects instead of complex serialization/deserialization. -""" - -from contextlib import contextmanager -from dataclasses import asdict -import json -import os -import typing as t - -from _pytest.pytester import Pytester - -from ddtestopt.internal.utils import asbool -from tests.mock_setup import MockFixture -from tests.mock_setup import setup_mocks_for_in_process - - -def create_fixture_with_nodeids( - skipping_enabled: bool = False, - auto_retries_enabled: bool = False, - efd_enabled: bool = False, - test_management_enabled: bool = False, - known_tests_enabled: bool = False, - skippable_items: t.Optional[t.List[str]] = None, - known_tests: t.Optional[t.List[str]] = None, - env_vars: t.Optional[t.Dict[str, str]] = None, -) -> MockFixture: - """Create a MockFixture directly with pytest nodeids (much simpler API). - - Examples: - - skippable_items=["test_file.py::test_name", "other_file.py"] - - known_tests=["test_file.py::test_function"] - """ - return MockFixture( - skipping_enabled=skipping_enabled, - auto_retries_enabled=auto_retries_enabled, - efd_enabled=efd_enabled, - test_management_enabled=test_management_enabled, - known_tests_enabled=known_tests_enabled, - skippable_items=skippable_items or [], - known_tests=known_tests or [], - env_vars=env_vars or {}, - ) - - -def get_subprocess_test_mode() -> bool: - """Get the test execution mode from environment variable. - - Set _DDTESTOPT_SUBPROCESS_TEST_MODE=1 to force subprocess execution. - Set _DDTESTOPT_SUBPROCESS_TEST_MODE=0 to force in-process execution. - """ - return asbool(os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE", "0")) - - -@contextmanager -def setup_test_mode_with_fixture( - pytester: Pytester, - fixture: MockFixture, - subprocess_mode: t.Optional[bool] = None, -) -> t.Generator[None, None, None]: - """Set up test environment with the given fixture. - - This is the main entry point that handles both subprocess and in-process modes. - """ - if subprocess_mode is None: - subprocess_mode = get_subprocess_test_mode() - - if subprocess_mode: - # Subprocess mode: create fixture file and static conftest.py - with _setup_subprocess_mode(pytester, fixture): - yield - else: - # In-process mode: use context manager - with setup_mocks_for_in_process(fixture): - yield - - -@contextmanager -def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Generator[None, None, None]: - """Set up subprocess mode with fixture file.""" - # Create fixture file in test directory - fixture_path = pytester.makefile(".json", fixture=json.dumps(asdict(fixture))) - - # Set environment variable to point to fixture file - pytester._monkeypatch.setenv("DDTESTOPT_FIXTURE_PATH", str(fixture_path)) - - # Set standard test environment variables - pytester._monkeypatch.setenv("DD_API_KEY", "test-api-key") - pytester._monkeypatch.setenv("DD_SERVICE", "test-service") - pytester._monkeypatch.setenv("DD_ENV", "test-env") - - if fixture.env_vars: - # Set additional environment variables from fixture - for key, value in fixture.env_vars.items(): - pytester._monkeypatch.setenv(key, value) - - # Create static conftest.py (will be created later) - _create_static_conftest(pytester) - - yield - - -def _create_static_conftest(pytester: Pytester) -> None: - """Create static conftest.py that reads fixture files.""" - conftest_content = '''#!/usr/bin/env python3 -"""Auto-generated conftest.py for fixture-based mocking.""" -import sys -from pathlib import Path - -# Add parent directory to path for imports -test_dir = Path(__file__).parent.parent -if str(test_dir) not in sys.path: - sys.path.insert(0, str(test_dir)) - -from tests.mock_setup import _setup_subprocess_mocks_from_fixture - -# Set up mocks as early as possible -_setup_subprocess_mocks_from_fixture() -''' - pytester.makeconftest(conftest_content) - - -def run_pytest_with_fixture( - pytester: Pytester, - pytest_args: t.List[str], - fixture: MockFixture, - subprocess_mode: t.Optional[bool] = None, -) -> t.Any: - """Run a test with the given fixture configuration. - - This is the main utility function that replaces run_test_with_mocks. - """ - with setup_test_mode_with_fixture(pytester, fixture, subprocess_mode): - if subprocess_mode: - return pytester.runpytest_subprocess(*pytest_args) - else: - return pytester.runpytest(*pytest_args) diff --git a/tests/mock_setup.py b/tests/integration_fixtures.py similarity index 64% rename from tests/mock_setup.py rename to tests/integration_fixtures.py index b9d3cf4..ac021e4 100644 --- a/tests/mock_setup.py +++ b/tests/integration_fixtures.py @@ -1,15 +1,12 @@ #!/usr/bin/env python3 -"""Shared mock setup logic for both subprocess and in-process testing modes. +"""Simple test fixtures for integration tests. -This module contains the actual mock setup functions that are used by both: -1. Subprocess mode: imported by generated conftest.py -2. In-process mode: imported directly by test code - -This approach ensures coverage tracking and eliminates code duplication. -Now uses builders from mocks.py for consistent mock creation. +This module provides a simplified approach to test configuration using plain +Python objects instead of complex serialization/deserialization. """ from contextlib import contextmanager +from dataclasses import asdict from dataclasses import dataclass import json import os @@ -17,15 +14,102 @@ from unittest.mock import Mock from unittest.mock import patch +from _pytest.pytester import Pytester + from ddtestopt.internal.test_data import ModuleRef from ddtestopt.internal.test_data import SuiteRef from ddtestopt.internal.test_data import TestRef +from ddtestopt.internal.utils import asbool from tests.mocks import APIClientMockBuilder from tests.mocks import BackendConnectorMockBuilder from tests.mocks import get_mock_git_instance -def create_patchers(fixture: t.Any) -> t.List[t.Any]: +def nodeid_to_test_ref(nodeid: str) -> TestRef: + """Convert pytest nodeid to TestRef object. + + Example: "test_file.py::test_name" → TestRef(...) + """ + if "::" not in nodeid: + raise ValueError(f"Invalid test nodeid (missing '::'): {nodeid}") + + file_path, test_name = nodeid.split("::", 1) + module_ref = ModuleRef(".") + suite_ref = SuiteRef(module_ref, file_path) + return TestRef(suite_ref, test_name) + + +def nodeid_to_suite_ref(nodeid: str) -> SuiteRef: + """Convert pytest nodeid to SuiteRef object. + + Example: "test_file.py" → SuiteRef(...) + """ + if "::" in nodeid: + raise ValueError(f"Cannot convert test nodeid to suite: {nodeid}") + + file_path = nodeid + module_ref = ModuleRef(".") + return SuiteRef(module_ref, file_path) + + +@dataclass +class MockFixture: + """Simple test fixture configuration using pytest nodeids. + + Uses simple strings (pytest nodeids) for much simpler JSON serialization. + Examples: + - "test_file.py::test_name" for individual tests + - "test_file.py" for entire test files/suites + """ + + # API client settings + skipping_enabled: bool = False + auto_retries_enabled: bool = False + efd_enabled: bool = False + test_management_enabled: bool = False + known_tests_enabled: bool = False + + # Simple string lists - much easier to serialize/deserialize + skippable_items: t.Optional[t.List[str]] = None # pytest nodeids + known_tests: t.Optional[t.List[str]] = None # pytest nodeids + + # Environment variables for the test + env_vars: t.Optional[t.Dict[str, str]] = None + + def __post_init__(self) -> None: + """Initialize empty containers if None.""" + if self.skippable_items is None: + self.skippable_items = [] + if self.known_tests is None: + self.known_tests = [] + if self.env_vars is None: + self.env_vars = {} + + @property + def parsed_skippable_items(self) -> t.Set[t.Union[TestRef, SuiteRef]]: + """Parse skippable nodeids to TestRef/SuiteRef objects.""" + items: t.Set[t.Union[TestRef, SuiteRef]] = set() + if not self.skippable_items: + return items + + for nodeid in self.skippable_items: + if "::" in nodeid: + # It's a test reference + items.add(nodeid_to_test_ref(nodeid)) + else: + # It's a suite/file reference + items.add(nodeid_to_suite_ref(nodeid)) + return items + + @property + def parsed_known_tests(self) -> t.Set[TestRef]: + """Parse known test nodeids to TestRef objects.""" + if not self.known_tests: + return set() + return {nodeid_to_test_ref(nodeid) for nodeid in self.known_tests} + + +def create_patchers(fixture: MockFixture) -> t.List[t.Any]: """Create all patch objects. Args: @@ -72,7 +156,7 @@ def create_patchers(fixture: t.Any) -> t.List[t.Any]: return patchers -def start_patchers(fixture: t.Any) -> t.List[t.Any]: +def start_patchers(fixture: MockFixture) -> t.List[t.Any]: """Set up mocks (called from conftest.py). This function starts all patches (and leaves them running for the subprocess). @@ -89,7 +173,7 @@ def start_patchers(fixture: t.Any) -> t.List[t.Any]: return patchers -def setup_mocks_for_in_process(fixture: t.Any) -> t.ContextManager[None]: +def setup_mocks_for_in_process(fixture: MockFixture) -> t.ContextManager[None]: """Set up mocks for in-process execution. Args: @@ -113,102 +197,108 @@ def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: return _mock_context() -def nodeid_to_test_ref(nodeid: str) -> TestRef: - """Convert pytest nodeid to TestRef object. +# DEV: This is imported inside subprocess conftest +def _setup_subprocess_mocks_from_fixture() -> None: + """Set up mocks by reading fixture file.""" + fixture_path = os.getenv("DDTESTOPT_FIXTURE_PATH") + if not fixture_path: + return - Example: "test_file.py::test_name" → TestRef(...) - """ - if "::" not in nodeid: - raise ValueError(f"Invalid test nodeid (missing '::'): {nodeid}") + # Read fixture file and create fixture object + with open(fixture_path, "r") as f: + fixture_data = json.load(f) - file_path, test_name = nodeid.split("::", 1) - module_ref = ModuleRef(".") - suite_ref = SuiteRef(module_ref, file_path) - return TestRef(suite_ref, test_name) + fixture = MockFixture(**fixture_data) + # Set up mocks using the simplified interface + start_patchers(fixture) -def nodeid_to_suite_ref(nodeid: str) -> SuiteRef: - """Convert pytest nodeid to SuiteRef object. - Example: "test_file.py" → SuiteRef(...) - """ - if "::" in nodeid: - raise ValueError(f"Cannot convert test nodeid to suite: {nodeid}") +@contextmanager +def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Generator[None, None, None]: + """Set up subprocess mode with fixture file.""" + conftest_content = '''#!/usr/bin/env python3 +"""Auto-generated conftest.py for fixture-based mocking.""" +import sys +from pathlib import Path - file_path = nodeid - module_ref = ModuleRef(".") - return SuiteRef(module_ref, file_path) +# Add parent directory to path for imports +test_dir = Path(__file__).parent.parent +if str(test_dir) not in sys.path: + sys.path.insert(0, str(test_dir)) +from tests.integration_fixtures import _setup_subprocess_mocks_from_fixture -@dataclass -class MockFixture: - """Simple test fixture configuration using pytest nodeids. +# Set up mocks as early as possible +_setup_subprocess_mocks_from_fixture() +''' - Uses simple strings (pytest nodeids) for much simpler JSON serialization. - Examples: - - "test_file.py::test_name" for individual tests - - "test_file.py" for entire test files/suites - """ + # Create fixture file in test directory + fixture_path = pytester.makefile(".json", fixture=json.dumps(asdict(fixture))) - # API client settings - skipping_enabled: bool = False - auto_retries_enabled: bool = False - efd_enabled: bool = False - test_management_enabled: bool = False - known_tests_enabled: bool = False + # Set environment variable to point to fixture file + pytester._monkeypatch.setenv("DDTESTOPT_FIXTURE_PATH", str(fixture_path)) - # Simple string lists - much easier to serialize/deserialize - skippable_items: t.Optional[t.List[str]] = None # pytest nodeids - known_tests: t.Optional[t.List[str]] = None # pytest nodeids + # Set standard test environment variables + pytester._monkeypatch.setenv("DD_API_KEY", "test-api-key") + pytester._monkeypatch.setenv("DD_SERVICE", "test-service") + pytester._monkeypatch.setenv("DD_ENV", "test-env") - # Environment variables for the test - env_vars: t.Optional[t.Dict[str, str]] = None + if fixture.env_vars: + # Set additional environment variables from fixture + for key, value in fixture.env_vars.items(): + pytester._monkeypatch.setenv(key, value) - def __post_init__(self) -> None: - """Initialize empty containers if None.""" - if self.skippable_items is None: - self.skippable_items = [] - if self.known_tests is None: - self.known_tests = [] - if self.env_vars is None: - self.env_vars = {} + # Create static conftest.py + pytester.makeconftest(conftest_content) - @property - def parsed_skippable_items(self) -> t.Set[t.Union[TestRef, SuiteRef]]: - """Parse skippable nodeids to TestRef/SuiteRef objects.""" - items: t.Set[t.Union[TestRef, SuiteRef]] = set() - if not self.skippable_items: - return items + yield - for nodeid in self.skippable_items: - if "::" in nodeid: - # It's a test reference - items.add(nodeid_to_test_ref(nodeid)) - else: - # It's a suite/file reference - items.add(nodeid_to_suite_ref(nodeid)) - return items - @property - def parsed_known_tests(self) -> t.Set[TestRef]: - """Parse known test nodeids to TestRef objects.""" - if not self.known_tests: - return set() - return {nodeid_to_test_ref(nodeid) for nodeid in self.known_tests} +def get_subprocess_test_mode() -> bool: + """Get the test execution mode from environment variable. + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=1 to force subprocess execution. + Set _DDTESTOPT_SUBPROCESS_TEST_MODE=0 to force in-process execution. + """ + return asbool(os.getenv("_DDTESTOPT_SUBPROCESS_TEST_MODE", "0")) -# DEV: This is imported inside subprocess conftest -def _setup_subprocess_mocks_from_fixture() -> None: - """Set up mocks by reading fixture file.""" - fixture_path = os.getenv("DDTESTOPT_FIXTURE_PATH") - if not fixture_path: - return - # Read fixture file and create fixture object - with open(fixture_path, "r") as f: - fixture_data = json.load(f) +@contextmanager +def setup_test_mode_with_fixture( + pytester: Pytester, + fixture: MockFixture, + subprocess_mode: t.Optional[bool] = None, +) -> t.Generator[None, None, None]: + """Set up test environment with the given fixture. - fixture = MockFixture(**fixture_data) + This is the main entry point that handles both subprocess and in-process modes. + """ + if subprocess_mode is None: + subprocess_mode = get_subprocess_test_mode() - # Set up mocks using the simplified interface - start_patchers(fixture) + if subprocess_mode: + # Subprocess mode: create fixture file and static conftest.py + with _setup_subprocess_mode(pytester, fixture): + yield + else: + # In-process mode: use context manager + with setup_mocks_for_in_process(fixture): + yield + + +def run_pytest_with_fixture( + pytester: Pytester, + pytest_args: t.List[str], + fixture: MockFixture, + subprocess_mode: t.Optional[bool] = None, +) -> t.Any: + """Run a test with the given fixture configuration. + + This is the main utility function that replaces run_test_with_mocks. + """ + with setup_test_mode_with_fixture(pytester, fixture, subprocess_mode): + if subprocess_mode: + return pytester.runpytest_subprocess(*pytest_args) + else: + return pytester.runpytest(*pytest_args) diff --git a/tests/test_integration.py b/tests/test_integration.py index 276a8f1..2ae67aa 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -9,8 +9,8 @@ from ddtestopt.internal.session_manager import SessionManager from ddtestopt.internal.test_data import TestSession -from tests.fixtures import create_fixture_with_nodeids -from tests.fixtures import run_pytest_with_fixture +from tests.integration_fixtures import MockFixture +from tests.integration_fixtures import run_pytest_with_fixture from tests.mocks import mock_api_client_settings from tests.mocks import setup_standard_mocks @@ -34,7 +34,7 @@ def test_simple(): ) # Create simple fixture with default settings - fixture = create_fixture_with_nodeids() + fixture = MockFixture() # Run test with automatic mode detection result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) @@ -63,9 +63,7 @@ def test_passes(): pytester._monkeypatch.setenv("DD_CIVISIBILITY_FLAKY_RETRY_COUNT", "2") # Create fixture with auto retries enabled - fixture = create_fixture_with_nodeids( - auto_retries_enabled=True, env_vars={"DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2"} - ) + fixture = MockFixture(auto_retries_enabled=True, env_vars={"DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2"}) # Run test with auto retries configuration result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) @@ -116,9 +114,7 @@ def test_known_test(): known_test_nodeid = "test_efd.py::test_known_test" # Create fixture with EFD enabled and known tests - fixture = create_fixture_with_nodeids( - efd_enabled=True, known_tests_enabled=True, known_tests=[known_test_nodeid] - ) + fixture = MockFixture(efd_enabled=True, known_tests_enabled=True, known_tests=[known_test_nodeid]) # Run test with EFD configuration result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) @@ -168,7 +164,7 @@ def test_should_run(): skippable_test_nodeid = "test_itr.py::test_should_be_skipped" # Create fixture with skipping enabled - fixture = create_fixture_with_nodeids(skipping_enabled=True, skippable_items=[skippable_test_nodeid]) + fixture = MockFixture(skipping_enabled=True, skippable_items=[skippable_test_nodeid]) # Run test with ITR configuration result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v", "-s"], fixture) @@ -213,7 +209,7 @@ def test_with_assertion(): ) # Create simple fixture with default settings - fixture = create_fixture_with_nodeids() + fixture = MockFixture() # Run test with automatic mode detection result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) @@ -239,7 +235,7 @@ def test_passing(): ) # Create simple fixture with default settings - fixture = create_fixture_with_nodeids() + fixture = MockFixture() # Run test with automatic mode detection result = run_pytest_with_fixture(pytester, ["-p", "ddtestopt", "-p", "no:ddtrace", "-v"], fixture) @@ -273,7 +269,7 @@ def test_simple_pass(): pytester._monkeypatch.setenv("DD_CIVISIBILITY_TOTAL_FLAKY_RETRY_COUNT", "5") # Create fixture with environment variables - fixture = create_fixture_with_nodeids( + fixture = MockFixture( env_vars={ "DD_CIVISIBILITY_FLAKY_RETRY_ENABLED": "true", "DD_CIVISIBILITY_FLAKY_RETRY_COUNT": "2", From 32a66f4f88eab8a4c222a0b115d5eb6eace67c17 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 16:59:09 +0200 Subject: [PATCH 19/20] re-do comments --- tests/integration_fixtures.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/integration_fixtures.py b/tests/integration_fixtures.py index ac021e4..460a6c8 100644 --- a/tests/integration_fixtures.py +++ b/tests/integration_fixtures.py @@ -1,8 +1,7 @@ #!/usr/bin/env python3 -"""Simple test fixtures for integration tests. +"""Test fixtures for integration tests. -This module provides a simplified approach to test configuration using plain -Python objects instead of complex serialization/deserialization. +This module provides an approach to prepare fixtures and run pytester in in-process or subprocess modes. """ from contextlib import contextmanager @@ -293,10 +292,7 @@ def run_pytest_with_fixture( fixture: MockFixture, subprocess_mode: t.Optional[bool] = None, ) -> t.Any: - """Run a test with the given fixture configuration. - - This is the main utility function that replaces run_test_with_mocks. - """ + """Run a test with the given fixture configuration.""" with setup_test_mode_with_fixture(pytester, fixture, subprocess_mode): if subprocess_mode: return pytester.runpytest_subprocess(*pytest_args) From 1bc74dba5c3333a8bfcf737e21cba31100ff9954 Mon Sep 17 00:00:00 2001 From: Federico Mon Date: Fri, 26 Sep 2025 17:14:03 +0200 Subject: [PATCH 20/20] pass fixture as json in env var --- tests/integration_fixtures.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/tests/integration_fixtures.py b/tests/integration_fixtures.py index 460a6c8..c97feef 100644 --- a/tests/integration_fixtures.py +++ b/tests/integration_fixtures.py @@ -198,14 +198,13 @@ def _mock_context() -> t.Generator[t.Any, t.Any, t.Any]: # DEV: This is imported inside subprocess conftest def _setup_subprocess_mocks_from_fixture() -> None: - """Set up mocks by reading fixture file.""" - fixture_path = os.getenv("DDTESTOPT_FIXTURE_PATH") - if not fixture_path: + """Set up mocks by reading fixture JSON from environment variable.""" + fixture_json = os.getenv("_DDTESTOPT_FIXTURE_JSON") + if not fixture_json: return - # Read fixture file and create fixture object - with open(fixture_path, "r") as f: - fixture_data = json.load(f) + # Parse JSON directly from environment variable + fixture_data = json.loads(fixture_json) fixture = MockFixture(**fixture_data) @@ -215,7 +214,7 @@ def _setup_subprocess_mocks_from_fixture() -> None: @contextmanager def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Generator[None, None, None]: - """Set up subprocess mode with fixture file.""" + """Set up subprocess mode with fixture JSON in environment variable.""" conftest_content = '''#!/usr/bin/env python3 """Auto-generated conftest.py for fixture-based mocking.""" import sys @@ -232,11 +231,8 @@ def _setup_subprocess_mode(pytester: Pytester, fixture: MockFixture) -> t.Genera _setup_subprocess_mocks_from_fixture() ''' - # Create fixture file in test directory - fixture_path = pytester.makefile(".json", fixture=json.dumps(asdict(fixture))) - - # Set environment variable to point to fixture file - pytester._monkeypatch.setenv("DDTESTOPT_FIXTURE_PATH", str(fixture_path)) + # Set fixture JSON directly in environment variable + pytester._monkeypatch.setenv("_DDTESTOPT_FIXTURE_JSON", json.dumps(asdict(fixture))) # Set standard test environment variables pytester._monkeypatch.setenv("DD_API_KEY", "test-api-key")