From 71bf4f39ad32d330d01719dc136f84b45d6d02d6 Mon Sep 17 00:00:00 2001 From: Nima Akbarzadeh Date: Sat, 4 Oct 2025 00:01:42 +0200 Subject: [PATCH] refactor(tests): Reduce code duplication in LLM tests Add helper function _call_complete_prompt() to eliminate repeated complete_prompt() calls with identical parameters across all test functions. This reduces test code by ~25% while maintaining the same test coverage. Changes: - Add _call_complete_prompt(temperature=0.0) helper function - Replace 5 duplicate complete_prompt() calls with helper - Reduce file from 104 to 79 lines (24% reduction) --- tests/sentry/llm/test_preview.py | 59 +++++++++----------------------- 1 file changed, 17 insertions(+), 42 deletions(-) diff --git a/tests/sentry/llm/test_preview.py b/tests/sentry/llm/test_preview.py index c6b464e035f008..e610449805d25a 100644 --- a/tests/sentry/llm/test_preview.py +++ b/tests/sentry/llm/test_preview.py @@ -4,6 +4,17 @@ from sentry.llm.usecases import LLMUseCase, complete_prompt +def _call_complete_prompt(temperature=0.0): + """Helper function to call complete_prompt with common test parameters.""" + return complete_prompt( + usecase=LLMUseCase.EXAMPLE, + prompt="prompt here", + message="message here", + temperature=temperature, + max_output_tokens=1024, + ) + + def test_complete_prompt(set_sentry_option) -> None: with ( set_sentry_option("llm.provider.options", {"preview": {"models": ["stub-1.0"]}}), @@ -12,13 +23,7 @@ def test_complete_prompt(set_sentry_option) -> None: {"example": {"provider": "preview", "options": {"model": "stub-1.0"}}}, ), ): - res = complete_prompt( - usecase=LLMUseCase.EXAMPLE, - prompt="prompt here", - message="message here", - temperature=0.0, - max_output_tokens=1024, - ) + res = _call_complete_prompt() assert res == "" @@ -32,13 +37,7 @@ def test_invalid_usecase_config(set_sentry_option) -> None: ), ): with pytest.raises(InvalidUsecaseError): - complete_prompt( - usecase=LLMUseCase.EXAMPLE, - prompt="prompt here", - message="message here", - temperature=0.0, - max_output_tokens=1024, - ) + _call_complete_prompt() def test_invalid_provider_config(set_sentry_option) -> None: @@ -50,13 +49,7 @@ def test_invalid_provider_config(set_sentry_option) -> None: ), ): with pytest.raises(InvalidProviderError): - complete_prompt( - usecase=LLMUseCase.EXAMPLE, - prompt="prompt here", - message="message here", - temperature=0.0, - max_output_tokens=1024, - ) + _call_complete_prompt() def test_invalid_model(set_sentry_option) -> None: @@ -68,13 +61,7 @@ def test_invalid_model(set_sentry_option) -> None: ), ): with pytest.raises(InvalidModelError): - complete_prompt( - usecase=LLMUseCase.EXAMPLE, - prompt="prompt here", - message="message here", - temperature=0.0, - max_output_tokens=1024, - ) + _call_complete_prompt() def test_invalid_temperature(set_sentry_option) -> None: @@ -86,18 +73,6 @@ def test_invalid_temperature(set_sentry_option) -> None: ), ): with pytest.raises(ValueError): - complete_prompt( - usecase=LLMUseCase.EXAMPLE, - prompt="prompt here", - message="message here", - temperature=-1, - max_output_tokens=1024, - ) + _call_complete_prompt(temperature=-1) with pytest.raises(ValueError): - complete_prompt( - usecase=LLMUseCase.EXAMPLE, - prompt="prompt here", - message="message here", - temperature=2, - max_output_tokens=1024, - ) + _call_complete_prompt(temperature=2)