diff --git a/validmind/__init__.py b/validmind/__init__.py index c99f3a537..216c26d20 100644 --- a/validmind/__init__.py +++ b/validmind/__init__.py @@ -53,6 +53,7 @@ run_documentation_tests, run_test_suite, ) +from .experimental import agents as experimental_agent from .tests.decorator import tags, tasks, test from .tests.run import print_env from .utils import is_notebook, parse_version @@ -126,4 +127,6 @@ def check_version(): "unit_metrics", "test_suites", "log_text", + # experimental features + "experimental_agent", ] diff --git a/validmind/experimental/__init__.py b/validmind/experimental/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/validmind/experimental/agents.py b/validmind/experimental/agents.py new file mode 100644 index 000000000..4b16eb792 --- /dev/null +++ b/validmind/experimental/agents.py @@ -0,0 +1,65 @@ +# Copyright © 2023-2024 ValidMind Inc. All rights reserved. +# See the LICENSE file in the root of this repository for details. +# SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial + +""" +Agent interface for all text generation tasks +""" + +import requests + +from validmind.api_client import _get_api_headers, _get_url, raise_api_error +from validmind.utils import is_html, md_to_html +from validmind.vm_models.result import TextGenerationResult + + +def run_task( + task: str, + input: dict, + show: bool = True, +) -> TextGenerationResult: + """ + Run text generation tasks using AI models. + + Args: + task (str): Type of text generation task to run. Currently supports: + - 'code_explainer': Generates natural language explanations of code + input (dict): Input parameters for the generation task: + - For code_explainer: Must contain 'source_code' and optional parameters + show (bool): Whether to display the generated result. Defaults to True. + + Returns: + TextGenerationResult: Result object containing the generated text and metadata + + Raises: + ValueError: If an unsupported task is provided + requests.exceptions.RequestException: If the API request fails + """ + if task == "code_explainer": + r = requests.post( + url=_get_url("ai/generate/code_explainer"), + headers=_get_api_headers(), + json=input, + ) + + if r.status_code != 200: + raise_api_error(r.text) + + generated_text = r.json()["content"] + else: + raise ValueError(f"Unsupported task: {task}") + + if not is_html(generated_text): + generated_text = md_to_html(generated_text, mathml=True) + + # Create a test result with the generated text + result = TextGenerationResult( + result_type=f"{task}", + description=generated_text, + title=f"Text Generation: {task}", + doc=f"Generated {task}", + ) + if show: + result.show() + + return result diff --git a/validmind/vm_models/result/__init__.py b/validmind/vm_models/result/__init__.py index aca6c17e6..a092c4da9 100644 --- a/validmind/vm_models/result/__init__.py +++ b/validmind/vm_models/result/__init__.py @@ -2,6 +2,20 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from .result import ErrorResult, RawData, Result, ResultTable, TestResult +from .result import ( + ErrorResult, + RawData, + Result, + ResultTable, + TestResult, + TextGenerationResult, +) -__all__ = ["ErrorResult", "RawData", "Result", "ResultTable", "TestResult"] +__all__ = [ + "ErrorResult", + "RawData", + "Result", + "ResultTable", + "TestResult", + "TextGenerationResult", +] diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index ba34bcd7a..8cab8cddc 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -129,6 +129,7 @@ class Result: result_id: str = None name: str = None + result_type: str = None def __str__(self) -> str: """May be overridden by subclasses.""" @@ -445,6 +446,7 @@ def serialize(self): async def log_async( self, section_id: str = None, + content_id: str = None, position: int = None, config: Dict[str, bool] = None, ): @@ -477,7 +479,6 @@ async def log_async( tasks.extend( [api_client.alog_figure(figure) for figure in (self.figures or [])] ) - if self.description: revision_name = ( AI_REVISION_NAME @@ -485,18 +486,19 @@ async def log_async( else DEFAULT_REVISION_NAME ) - tasks.append( - update_metadata( - content_id=f"test_description:{self.result_id}::{revision_name}", - text=self.description, - ) + tasks.append( + update_metadata( + content_id=f"{content_id}:{revision_name}", + text=self.description, ) + ) return await asyncio.gather(*tasks) def log( self, section_id: str = None, + content_id: str = None, position: int = None, unsafe: bool = False, config: Dict[str, bool] = None, @@ -506,6 +508,7 @@ def log( Args: section_id (str): The section ID within the model document to insert the test result. + content_id (str): The content ID to log the result to. position (int): The position (index) within the section to insert the test result. unsafe (bool): If True, log the result even if it contains sensitive data @@ -533,6 +536,7 @@ def log( run_async( self.log_async, section_id=section_id, + content_id=content_id, position=position, config=config, ) @@ -568,3 +572,110 @@ def validate_log_config(self, config: Dict[str, bool]): raise InvalidParameterError( f"Values for config keys must be boolean. Non-boolean values found for keys: {', '.join(non_bool_keys)}" ) + + +@dataclass +class TextGenerationResult(Result): + """Test result.""" + + name: str = "Text Generation Result" + ref_id: str = None + title: Optional[str] = None + doc: Optional[str] = None + description: Optional[Union[str, DescriptionFuture]] = None + params: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None + _was_description_generated: bool = False + + def __post_init__(self): + if self.ref_id is None: + self.ref_id = str(uuid4()) + + def __repr__(self) -> str: + attrs = [ + attr + for attr in [ + "doc", + "description", + "params", + ] + if getattr(self, attr) is not None + and ( + len(getattr(self, attr)) > 0 + if isinstance(getattr(self, attr), list) + else True + ) + ] + + return f'TextGenerationResult("{self.result_id}", {", ".join(attrs)})' + + def __getattribute__(self, name): + # lazy load description if its a DescriptionFuture (generated in background) + if name == "description": + description = super().__getattribute__("description") + + if isinstance(description, DescriptionFuture): + self._was_description_generated = True + self.description = description.get_description() + + return super().__getattribute__(name) + + @property + def test_name(self) -> str: + """Get the test name, using custom title if available.""" + return self.title or test_id_to_name(self.result_id) + + def to_widget(self): + template_data = { + "test_name": self.test_name, + "description": self.description.replace("h3", "strong"), + "params": ( + json.dumps(self.params, cls=NumpyEncoder, indent=2) + if self.params + else None + ), + } + rendered = get_result_template().render(**template_data) + + widgets = [HTML(rendered)] + + return VBox(widgets) + + def serialize(self): + """Serialize the result for the API.""" + return { + "test_name": self.result_id, + "title": self.title, + "ref_id": self.ref_id, + "params": self.params, + "metadata": self.metadata, + } + + async def log_async( + self, + content_id: str = None, + ): + return await asyncio.gather( + update_metadata( + content_id=f"{content_id}", + text=self.description, + ) + ) + + def log( + self, + content_id: str = None, + ): + """Log the result to ValidMind. + + Args: + section_id (str): The section ID within the model document to insert the + test result. + content_id (str): The content ID to log the result to. + position (int): The position (index) within the section to insert the test + result. + """ + run_async( + self.log_async, + content_id=content_id, + )