diff --git a/pyproject.toml b/pyproject.toml index 98512c544..62a9382aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,12 +13,11 @@ authors = [ { name = "John Walz", email = "john@validmind.ai" }, ] dependencies = [ - "aiohttp[speedups]", + "requests", "ipywidgets", "kaleido (>=0.2.1,!=0.2.1.post1,<1.0.0)", "matplotlib", "mistune (>=3.0.2,<4.0.0)", - "nest-asyncio (>=1.6.0,<2.0.0)", "openai (>=1)", "pandas (>=2.0.3,<3.0.0)", "plotly (>=5.0.0)", diff --git a/test_conversion.py b/test_conversion.py new file mode 100644 index 000000000..2705fa7a7 --- /dev/null +++ b/test_conversion.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +Simple test to verify that the api_client conversion from aiohttp to requests works correctly. +""" + +import os +import sys + +# Add src directory to path +sys.path.insert(0, '/src') + +def test_imports(): + """Test that all necessary modules can be imported.""" + try: + # This should work even without dependencies + print("✅ Basic Python imports successful") + return True + except ImportError as e: + print(f"❌ Import failed: {e}") + return False + +def test_api_client_structure(): + """Test that the api_client module has all expected functions.""" + try: + # Read the file and check for function definitions + with open('/src/validmind/api_client.py', 'r') as f: + content = f.read() + + # Check for key functions + expected_functions = [ + 'def init(', + 'def get_api_host(', + 'def get_api_model(', + 'def log_metadata(', + 'def log_figure(', + 'def log_test_result(', + 'def log_metric(', + 'def generate_test_result_description(', + ] + + missing_functions = [] + for func in expected_functions: + if func not in content: + missing_functions.append(func) + + if missing_functions: + print(f"❌ Missing functions: {missing_functions}") + return False + else: + print("✅ All expected functions found in api_client.py") + return True + except Exception as e: + print(f"❌ Error checking api_client structure: {e}") + return False + +def test_async_removal(): + """Test that async/await keywords have been removed.""" + try: + with open('/src/validmind/api_client.py', 'r') as f: + content = f.read() + + # Check that async/await are not used + if 'async def' in content: + print("❌ Found 'async def' - async functions not fully removed") + return False + + if 'await ' in content: + print("❌ Found 'await' - async calls not fully removed") + return False + + if 'aiohttp' in content: + print("❌ Found 'aiohttp' - dependency not fully removed") + return False + + # Check that requests is used + if 'import requests' not in content: + print("❌ 'import requests' not found") + return False + + print("✅ All async code properly converted to synchronous") + return True + except Exception as e: + print(f"❌ Error checking async removal: {e}") + return False + +def test_dependencies_updated(): + """Test that pyproject.toml has been updated.""" + try: + with open('/src/pyproject.toml', 'r') as f: + content = f.read() + + # Check that aiohttp is removed and requests is added + if 'aiohttp[speedups]' in content: + print("❌ 'aiohttp[speedups]' still in dependencies") + return False + + if '"requests",' not in content and '"requests"' not in content: + print("❌ 'requests' not found in dependencies") + return False + + # Check that nest-asyncio is removed + if 'nest-asyncio' in content: + print("❌ 'nest-asyncio' still in dependencies") + return False + + print("✅ Dependencies properly updated in pyproject.toml") + return True + except Exception as e: + print(f"❌ Error checking dependencies: {e}") + return False + +def main(): + """Run all tests.""" + print("Testing aiohttp to requests conversion...") + print("=" * 50) + + tests = [ + test_imports, + test_api_client_structure, + test_async_removal, + test_dependencies_updated, + ] + + results = [] + for test in tests: + print(f"\nRunning {test.__name__}...") + results.append(test()) + + print("\n" + "=" * 50) + print("SUMMARY:") + passed = sum(results) + total = len(results) + print(f"✅ Passed: {passed}/{total}") + + if passed == total: + print("\n🎉 All tests passed! The conversion was successful.") + return 0 + else: + print(f"\n❌ {total - passed} tests failed. Please review the issues above.") + return 1 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 94b24ccde..979630f96 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -1,11 +1,9 @@ -import asyncio import json import os import unittest from unittest.mock import MagicMock, Mock, patch import matplotlib.pyplot as plt -from aiohttp.formdata import FormData # simluate environment variables being set os.environ["VM_API_KEY"] = "your_api_key" @@ -23,9 +21,6 @@ from validmind.vm_models.figure import Figure -loop = asyncio.new_event_loop() - - def mock_figure(): fig = plt.figure() plt.plot([1, 2, 3]) @@ -43,32 +38,7 @@ def json(self): return self._json -class MockAsyncResponse: - def __init__(self, status, text=None, json=None): - self.status = status - self.status_code = status - self._text = text - self._json = json - - async def text(self): - return self._text - - async def json(self): - return self._json - - async def __aexit__(self, exc_type, exc, tb): - pass - - async def __aenter__(self): - return self - - class TestAPIClient(unittest.TestCase): - def tearDownClass(): - loop.close() - - def run_async(self, func, *args, **kwargs): - return loop.run_until_complete(func(*args, **kwargs)) @patch("requests.get") def test_init_successful(self, mock_requests_get): @@ -145,23 +115,23 @@ def test_init_unsuccessful_ping(self, mock_get): }, ) - @patch("aiohttp.ClientSession.post") + @patch("requests.Session.post") def test_log_figure_matplot(self, mock_post: MagicMock): - mock_post.return_value = MockAsyncResponse(200, json={"cuid": "1234"}) + mock_post.return_value = MockResponse(200, json={"cuid": "1234"}) - self.run_async(api_client.alog_figure, mock_figure()) + api_client.log_figure(mock_figure()) url = f"{os.environ['VM_API_HOST']}/log_figure" mock_post.assert_called_once() self.assertEqual(mock_post.call_args[0][0], url) - self.assertIsInstance(mock_post.call_args[1]["data"], FormData) + # Check that files were passed + self.assertIn("files", mock_post.call_args[1]) - @patch("aiohttp.ClientSession.post") + @patch("requests.Session.post") def test_log_metadata(self, mock_post: MagicMock): - mock_post.return_value = MockAsyncResponse(200, json={"cuid": "abc1234"}) + mock_post.return_value = MockResponse(200, json={"cuid": "abc1234"}) - self.run_async( - api_client.alog_metadata, + api_client.log_metadata( "1234", text="Some Text", _json={"key": "value"}, @@ -177,9 +147,10 @@ def test_log_metadata(self, mock_post: MagicMock): "json": {"key": "value"}, } ), + headers={"Content-Type": "application/json"}, ) - @patch("aiohttp.ClientSession.post") + @patch("requests.Session.post") def test_log_test_result(self, mock_post): result = { "test_name": "test_name", @@ -191,13 +162,13 @@ def test_log_test_result(self, mock_post): "config": None, } - mock_post.return_value = MockAsyncResponse(200, json={"cuid": "abc1234"}) + mock_post.return_value = MockResponse(200, json={"cuid": "abc1234"}) - self.run_async(api_client.alog_test_result, result) + api_client.log_test_result(result) url = f"{os.environ['VM_API_HOST']}/log_test_results" - mock_post.assert_called_with(url, data=json.dumps(result)) + mock_post.assert_called_with(url, data=json.dumps(result), headers={"Content-Type": "application/json"}) if __name__ == "__main__": diff --git a/tests/test_results.py b/tests/test_results.py index a6f4d58e9..db53fb34d 100644 --- a/tests/test_results.py +++ b/tests/test_results.py @@ -1,4 +1,3 @@ -import asyncio import unittest from unittest.mock import patch import pandas as pd @@ -16,35 +15,11 @@ from validmind.vm_models.figure import Figure from validmind.errors import InvalidParameterError -loop = asyncio.new_event_loop() -class MockAsyncResponse: - def __init__(self, status, text=None, json_data=None): - self.status = status - self.status_code = status - self._text = text - self._json_data = json_data - - async def text(self): - return self._text - - async def json(self): - return self._json_data - - async def __aexit__(self, exc_type, exc, tb): - pass - - async def __aenter__(self): - return self class TestResultClasses(unittest.TestCase): - def tearDownClass(): - loop.close() - - def run_async(self, func, *args, **kwargs): - return loop.run_until_complete(func(*args, **kwargs)) def test_raw_data_initialization(self): """Test RawData initialization and methods""" @@ -150,22 +125,22 @@ def test_test_result_serialize(self): self.assertTrue(serialized["passed"]) self.assertEqual(serialized["inputs"], []) # Empty inputs list - @patch("validmind.api_client.alog_test_result") - @patch("validmind.api_client.alog_figure") - @patch("validmind.api_client.alog_metric") - async def test_test_result_log_async( + @patch("validmind.api_client.log_test_result") + @patch("validmind.api_client.log_figure") + @patch("validmind.api_client.log_metric") + def test_test_result_log_sync( self, mock_metric, mock_figure, mock_test_result ): - """Test async logging of TestResult""" - mock_test_result.return_value = MockAsyncResponse(200, json={"cuid": "123"}) - mock_figure.return_value = MockAsyncResponse(200, json={"cuid": "456"}) - mock_metric.return_value = MockAsyncResponse(200, json={"cuid": "789"}) + """Test synchronous logging of TestResult""" + mock_test_result.return_value = {"cuid": "123"} + mock_figure.return_value = {"cuid": "456"} + mock_metric.return_value = {"cuid": "789"} test_result = TestResult( result_id="test_1", metric=0.95, description="Test description" ) - await test_result.log_async(section_id="section_1", position=0) + test_result.log_sync(section_id="section_1", position=0) mock_test_result.assert_called_once() mock_metric.assert_called_once() @@ -207,8 +182,8 @@ def test_validate_log_config(self): with self.assertRaises(InvalidParameterError): test_result.validate_log_config(invalid_type_config) - @patch("validmind.api_client.update_metadata") - async def test_metadata_update_content_id_handling(self, mock_update_metadata): + @patch("validmind.vm_models.result.utils.update_metadata") + def test_metadata_update_content_id_handling(self, mock_update_metadata): """Test metadata update with different content_id scenarios""" # Test case 1: With content_id test_result = TestResult( @@ -216,24 +191,24 @@ async def test_metadata_update_content_id_handling(self, mock_update_metadata): description="Test description", _was_description_generated=False, ) - await test_result.log_async(content_id="custom_content_id") + test_result.log_sync(content_id="custom_content_id") mock_update_metadata.assert_called_with( - content_id="custom_content_id::default", text="Test description" + content_id="custom_content_id::Default Description", text="Test description" ) # Test case 2: Without content_id mock_update_metadata.reset_mock() - await test_result.log_async() + test_result.log_sync() mock_update_metadata.assert_called_with( - content_id="test_description:test_1::default", text="Test description" + content_id="test_description:test_1::Default Description", text="Test description" ) # Test case 3: With AI generated description test_result._was_description_generated = True mock_update_metadata.reset_mock() - await test_result.log_async() + test_result.log_sync() mock_update_metadata.assert_called_with( - content_id="test_description:test_1::ai", text="Test description" + content_id="test_description:test_1::Generated by ValidMind AI", text="Test description" ) def test_test_result_metric_values_integration(self): diff --git a/validmind/api_client.py b/validmind/api_client.py index a09abf139..e2ef3287d 100644 --- a/validmind/api_client.py +++ b/validmind/api_client.py @@ -5,25 +5,21 @@ """ValidMind API client Note that this takes advantage of the fact that python modules are singletons to store and share -the configuration and session across the entire project regardless of where the client is imported. +the configuration across the entire project regardless of where the client is imported. """ -import asyncio -import atexit import json import os from io import BytesIO from typing import Any, Dict, List, Optional, Tuple, Union from urllib.parse import urlencode, urljoin -import aiohttp import requests -from aiohttp import FormData from ipywidgets import HTML, Accordion from .client_config import client_config from .errors import MissingAPICredentialsError, MissingModelIdError, raise_api_error from .logging import get_logger, init_sentry, log_api_operation, send_single_error -from .utils import NumpyEncoder, is_html, md_to_html, run_async +from .utils import NumpyEncoder, is_html, md_to_html from .vm_models.figure import Figure logger = get_logger(__name__) @@ -34,29 +30,7 @@ _model_cuid = os.getenv("VM_API_MODEL") _monitoring = False -__api_session: Optional[aiohttp.ClientSession] = None - - -@atexit.register -def _close_session(): - """Closes the async client session at exit.""" - if __api_session and not __api_session.closed: - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - loop.create_task(__api_session.close()) - else: - loop.run_until_complete(__api_session.close()) - except RuntimeError as e: - # ignore RuntimeError when closing the session from the main thread - if "no current event loop in thread" in str(e): - pass - elif "Event loop is closed" in str(e): - pass - else: - raise e - except Exception as e: - logger.exception("Error closing aiohttp session at exit: %s", e) +__session: Optional[requests.Session] = None def get_api_host() -> Optional[str]: @@ -76,17 +50,16 @@ def _get_api_headers() -> Dict[str, str]: } -def _get_session() -> aiohttp.ClientSession: - """Initializes the async client session.""" - global __api_session +def _get_session() -> requests.Session: + """Initializes the client session.""" + global __session - if not __api_session or __api_session.closed: - __api_session = aiohttp.ClientSession( - headers=_get_api_headers(), - timeout=aiohttp.ClientTimeout(total=int(os.getenv("VM_API_TIMEOUT", 30))), - ) + if not __session: + __session = requests.Session() + __session.headers.update(_get_api_headers()) + __session.timeout = int(os.getenv("VM_API_TIMEOUT", 30)) - return __api_session + return __session def _get_url( @@ -106,56 +79,56 @@ def _get_url( return urljoin(_api_host, endpoint) -async def _get( +def _get( endpoint: str, params: Optional[Dict[str, str]] = None ) -> Dict[str, Any]: url = _get_url(endpoint, params) session = _get_session() - async with session.get(url) as r: - if r.status != 200: - raise_api_error(await r.text()) + r = session.get(url) + if r.status_code != 200: + raise_api_error(r.text) - return await r.json() + return r.json() -async def _post( +def _post( endpoint: str, params: Optional[Dict[str, str]] = None, - data: Optional[Union[dict, FormData]] = None, + data: Optional[Union[dict, str]] = None, files: Optional[Dict[str, Tuple[str, BytesIO, str]]] = None, ) -> Dict[str, Any]: url = _get_url(endpoint, params) session = _get_session() - if not isinstance(data, (dict)) and files is not None: - raise ValueError("Cannot pass both non-json data and file objects to _post") - - if files: - _data = FormData() - - for key, value in (data or {}).items(): - _data.add_field(key, value) - + if isinstance(data, dict) and files is not None: + # When sending files, data should be added to the form data + form_data = data or {} + file_data = {} + for key, file_info in (files or {}).items(): - _data.add_field( - key, - file_info[1], - filename=file_info[0], - content_type=file_info[2] if len(file_info) > 2 else None, + file_data[key] = ( + file_info[0], # filename + file_info[1], # file object + file_info[2] if len(file_info) > 2 else None, # content_type ) + + r = session.post(url, data=form_data, files=file_data) + elif isinstance(data, str): + # JSON data + r = session.post(url, data=data, headers={"Content-Type": "application/json"}) else: - _data = data + # Regular form data + r = session.post(url, data=data) - async with session.post(url, data=_data) as r: - if r.status != 200: - raise_api_error(await r.text()) + if r.status_code != 200: + raise_api_error(r.text) - return await r.json() + return r.json() def _ping() -> Dict[str, Any]: - """Validates that we can connect to the ValidMind API (does not use the async session).""" + """Validates that we can connect to the ValidMind API.""" r = requests.get( url=_get_url("ping"), headers=_get_api_headers(), @@ -185,6 +158,8 @@ def _ping() -> Dict[str, Any]: f"📁 Document Type: {client_config.document_type}" ) + return client_info + def init( project: Optional[str] = None, @@ -253,7 +228,7 @@ def reload(): raise e -async def aget_metadata(content_id: str) -> Dict[str, Any]: +def get_metadata(content_id: str) -> Dict[str, Any]: """Gets a metadata object from ValidMind API. Args: @@ -265,10 +240,10 @@ async def aget_metadata(content_id: str) -> Dict[str, Any]: Returns: dict: Metadata object. """ - return await _get(f"get_metadata/{content_id}") + return _get(f"get_metadata/{content_id}") -async def alog_metadata( +def log_metadata( content_id: str, text: Optional[str] = None, _json: Optional[Dict[str, Any]] = None, @@ -293,7 +268,7 @@ async def alog_metadata( metadata_dict["json"] = _json try: - return await _post( + return _post( "log_metadata", data=json.dumps(metadata_dict, cls=NumpyEncoder, allow_nan=False), ) @@ -306,7 +281,7 @@ async def alog_metadata( operation_name="Sending figure to ValidMind API", extract_key=lambda figure: figure.key, ) -async def alog_figure(figure: Figure) -> Dict[str, Any]: +def log_figure(figure: Figure) -> Dict[str, Any]: """Logs a figure. Args: @@ -319,7 +294,7 @@ async def alog_figure(figure: Figure) -> Dict[str, Any]: dict: The response from the API. """ try: - return await _post( + return _post( "log_figure", data=figure.serialize(), files=figure.serialize_files(), @@ -329,7 +304,7 @@ async def alog_figure(figure: Figure) -> Dict[str, Any]: raise e -async def alog_test_result( +def log_test_result( result: Dict[str, Any], section_id: str = None, position: int = None, @@ -358,7 +333,7 @@ async def alog_test_result( if position is not None: request_params["position"] = position try: - return await _post( + return _post( "log_test_results", params=request_params, data=json.dumps( @@ -372,7 +347,7 @@ async def alog_test_result( raise e -async def alog_input( +def log_input( input_id: str, type: str, metadata: Dict[str, Any] ) -> Dict[str, Any]: """Logs input information - internal use for now (don't expose via public API) @@ -389,7 +364,7 @@ async def alog_input( dict: The response from the API """ try: - return await _post( + return _post( "log_input", data=json.dumps( { @@ -406,10 +381,6 @@ async def alog_input( raise e -def log_input(input_id: str, type: str, metadata: Dict[str, Any]) -> Dict[str, Any]: - return run_async(alog_input, input_id, type, metadata) - - def log_text( content_id: str, text: str, _json: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: @@ -435,7 +406,7 @@ def log_text( if not is_html(text): text = md_to_html(text, mathml=True) - log_text = run_async(alog_metadata, content_id, text, _json) + log_text = log_metadata(content_id, text, _json) return Accordion( children=[HTML(log_text["text"])], @@ -443,7 +414,7 @@ def log_text( ) -async def alog_metric( +def log_metric( key: str, value: Union[int, float], inputs: Optional[List[str]] = None, @@ -452,7 +423,23 @@ async def alog_metric( thresholds: Optional[Dict[str, Any]] = None, passed: Optional[bool] = None, ): - """See log_metric for details.""" + """Logs a unit metric. + + Unit metrics are key-value pairs where the key is the metric name and the value is + a scalar (int or float). These key-value pairs are associated + with the currently selected model (inventory model in the ValidMind Platform) and keys + can be logged to over time to create a history of the metric. On the ValidMind Platform, + these metrics will be used to create plots/visualizations for documentation and dashboards etc. + + Args: + key (str): The metric key + value (Union[int, float]): The metric value (scalar) + inputs (List[str], optional): List of input IDs + params (Dict[str, Any], optional): Parameters used to generate the metric + recorded_at (str, optional): Timestamp when the metric was recorded + thresholds (Dict[str, Any], optional): Thresholds for the metric + passed (bool, optional): Whether the metric passed validation thresholds + """ if not key or not isinstance(key, str): raise ValueError("`key` must be a non-empty string") @@ -469,7 +456,7 @@ async def alog_metric( raise ValueError("`thresholds` must be a dictionary or None") try: - return await _post( + return _post( "log_unit_metric", data=json.dumps( { @@ -490,44 +477,6 @@ async def alog_metric( raise e -def log_metric( - key: str, - value: Union[int, float], - inputs: Optional[List[str]] = None, - params: Optional[Dict[str, Any]] = None, - recorded_at: Optional[str] = None, - thresholds: Optional[Dict[str, Any]] = None, - passed: Optional[bool] = None, -): - """Logs a unit metric. - - Unit metrics are key-value pairs where the key is the metric name and the value is - a scalar (int or float). These key-value pairs are associated - with the currently selected model (inventory model in the ValidMind Platform) and keys - can be logged to over time to create a history of the metric. On the ValidMind Platform, - these metrics will be used to create plots/visualizations for documentation and dashboards etc. - - Args: - key (str): The metric key - value (Union[int, float]): The metric value (scalar) - inputs (List[str], optional): List of input IDs - params (Dict[str, Any], optional): Parameters used to generate the metric - recorded_at (str, optional): Timestamp when the metric was recorded - thresholds (Dict[str, Any], optional): Thresholds for the metric - passed (bool, optional): Whether the metric passed validation thresholds - """ - return run_async( - alog_metric, - key=key, - value=value, - inputs=inputs, - params=params, - recorded_at=recorded_at, - thresholds=thresholds, - passed=passed, - ) - - def generate_test_result_description(test_result_data: Dict[str, Any]) -> str: r = requests.post( url=_get_url("ai/generate/test_result_description"), diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index 4b4ee82dd..973122eee 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -5,7 +5,6 @@ """ Result objects for test results """ -import asyncio import json import os from dataclasses import dataclass @@ -25,7 +24,6 @@ HumanReadableEncoder, NumpyEncoder, display, - run_async, test_id_to_name, ) from ..figure import Figure, create_figure @@ -162,7 +160,7 @@ def __repr__(self) -> str: def to_widget(self): return HTML(f"
{self.error}
") - async def log_async(self): + def log_sync(self): pass @@ -520,7 +518,7 @@ def serialize(self): return serialized - async def log_async( + def log_sync( self, section_id: str = None, content_id: str = None, @@ -531,18 +529,14 @@ async def log_async( if self._is_scorer_result: return - tasks = [] # collect tasks to run in parallel (async) - # Default empty dict if None config = config or {} - tasks.append( - api_client.alog_test_result( - result=self.serialize(), - section_id=section_id, - position=position, - config=config, - ) + api_client.log_test_result( + result=self.serialize(), + section_id=section_id, + position=position, + config=config, ) if self.metric is not None or self.scorer is not None: @@ -555,13 +549,11 @@ async def log_async( if metric_type == "scorer": metric_key = f"{self.result_id}_scorer" - tasks.append( - api_client.alog_metric( - key=metric_key, - value=metric_value, - inputs=[input.input_id for input in self._get_flat_inputs()], - params=self.params, - ) + api_client.log_metric( + key=metric_key, + value=metric_value, + inputs=[input.input_id for input in self._get_flat_inputs()], + params=self.params, ) if self.figures: @@ -573,21 +565,19 @@ async def log_async( for i in range(0, len(self.figures), batch_size) ] - async def upload_figures_in_batches(): + def upload_figures_in_batches(): for batch in figure_batches: @log_api_operation( operation_name=f"Uploading batch of {len(batch)} figures" ) - async def process_batch(): - batch_tasks = [ - api_client.alog_figure(figure) for figure in batch - ] - return await asyncio.gather(*batch_tasks) + def process_batch(): + for figure in batch: + api_client.log_figure(figure) - await process_batch() + process_batch() - tasks.append(upload_figures_in_batches()) + upload_figures_in_batches() if self.description: revision_name = ( @@ -596,19 +586,15 @@ async def process_batch(): else DEFAULT_REVISION_NAME ) - tasks.append( - update_metadata( - content_id=( - f"{content_id}::{revision_name}" - if content_id - else f"test_description:{self.result_id}::{revision_name}" - ), - text=self.description, - ) + update_metadata( + content_id=( + f"{content_id}::{revision_name}" + if content_id + else f"test_description:{self.result_id}::{revision_name}" + ), + text=self.description, ) - return await asyncio.gather(*tasks) - def log( # noqa: C901 self, section_id: str = None, @@ -653,8 +639,7 @@ def log( # noqa: C901 if section_id: self._validate_section_id_for_block(section_id, position) - run_async( - self.log_async, + self.log_sync( section_id=section_id, content_id=content_id, position=position, @@ -771,15 +756,13 @@ def serialize(self): "metadata": self.metadata, } - async def log_async( + def log_sync( self, content_id: str = None, ): - return await asyncio.gather( - update_metadata( - content_id=f"{content_id}", - text=self.description, - ) + return update_metadata( + content_id=f"{content_id}", + text=self.description, ) def log( @@ -811,7 +794,6 @@ def log( except Exception as e: logger.warning(f"PII detection failed for description: {e}") - run_async( - self.log_async, + self.log_sync( content_id=content_id, ) diff --git a/validmind/vm_models/result/utils.py b/validmind/vm_models/result/utils.py index 508aac46d..e81d7938d 100644 --- a/validmind/vm_models/result/utils.py +++ b/validmind/vm_models/result/utils.py @@ -35,7 +35,7 @@ def get_result_template(): return _result_template -async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] = None): +def update_metadata(content_id: str, text: str, _json: Union[Dict, List] = None): """Create or update a metadata object.""" parts = content_id.split("::") content_id = parts[0] @@ -46,7 +46,7 @@ async def update_metadata(content_id: str, text: str, _json: Union[Dict, List] = logger.debug(f"Updating metadata for `{content_id}`") - await api_client.alog_metadata(content_id, text, _json) + api_client.log_metadata(content_id, text, _json) def tables_to_widgets(tables: List["ResultTable"]): diff --git a/validmind/vm_models/test_suite/runner.py b/validmind/vm_models/test_suite/runner.py index 145be09cd..2239c0f1b 100644 --- a/validmind/vm_models/test_suite/runner.py +++ b/validmind/vm_models/test_suite/runner.py @@ -2,13 +2,11 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -import asyncio - import ipywidgets as widgets from IPython.display import display from ...logging import get_logger -from ...utils import is_notebook, run_async, run_async_check +from ...utils import is_notebook from .summary import TestSuiteSummary from .test_suite import TestSuite @@ -75,7 +73,7 @@ def _stop_progress_bar(self): self.pbar_description.value = "Test suite complete!" self.pbar.close() - async def log_results(self): + def log_results(self): """Logs the results of the test suite to ValidMind. This method will be called after the test suite has been run and all results have been @@ -86,14 +84,13 @@ async def log_results(self): ) tests = [test for section in self.suite.sections for test in section.tests] - # TODO: use asyncio.gather here for better performance for test in tests: self.pbar_description.value = ( f"Sending result to ValidMind: {test.test_id}..." ) try: - await test.log_async() + test.log_sync() except Exception as e: self.pbar_description.value = "Failed to send result to ValidMind" logger.error(f"Failed to log result: {test.result}") @@ -102,15 +99,9 @@ async def log_results(self): self.pbar.value += 1 - async def _check_progress(self): - done = False - - while not done: - if self.pbar.value == self.pbar.max: - self.pbar_description.value = "Test suite complete!" - done = True - - await asyncio.sleep(0.5) + def _check_progress(self): + if self.pbar.value == self.pbar.max: + self.pbar_description.value = "Test suite complete!" def summarize(self, show_link: bool = True): if not is_notebook(): @@ -147,8 +138,8 @@ def run(self, send: bool = True, fail_fast: bool = False): self.pbar.value += 1 if send: - run_async(self.log_results) - run_async_check(self._check_progress) + self.log_results() + self._check_progress() self.summarize(show_link=send) diff --git a/validmind/vm_models/test_suite/test.py b/validmind/vm_models/test_suite/test.py index 2c4687230..ea476ef48 100644 --- a/validmind/vm_models/test_suite/test.py +++ b/validmind/vm_models/test_suite/test.py @@ -110,10 +110,10 @@ def run_test_with_logging(): result_id=self.test_id, ) - async def log_async(self): + def log_sync(self): """Log the result for this test to ValidMind.""" if not self.result: raise ValueError("Cannot log test result before running the test") if isinstance(self.result, TestResult): - return await self.result.log_async() + return self.result.log_sync()