diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 3920cdc2f..7e2a9d7b6 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -201,6 +201,7 @@ def test_log_test_result(self, mock_post): "inputs": ["input1"], "passed": True, "summary": [{"key": "value"}], + "config": None, } mock_post.return_value = MockAsyncResponse(200, json={"cuid": "abc1234"}) diff --git a/validmind/api_client.py b/validmind/api_client.py index cb6228616..27c167b6f 100644 --- a/validmind/api_client.py +++ b/validmind/api_client.py @@ -330,6 +330,8 @@ async def alog_test_result( result: Dict[str, Any], section_id: str = None, position: int = None, + unsafe: bool = False, + config: Dict[str, bool] = None, ) -> Dict[str, Any]: """Logs test results information @@ -357,7 +359,7 @@ async def alog_test_result( "log_test_results", params=request_params, data=json.dumps( - result, + {**result, "config": config}, cls=NumpyEncoder, allow_nan=False, ), @@ -458,24 +460,15 @@ def log_metric( recorded_at: Optional[str] = None, thresholds: Optional[Dict[str, Any]] = None, ): - """Logs a unit metric - - Unit metrics are key-value pairs where the key is the metric name and the value is - a scalar (int or float). These key-value pairs are associated with the currently - selected model (inventory model in the ValidMind Platform) and keys can be logged - to over time to create a history of the metric. On the ValidMind Platform, these metrics - will be used to create plots/visualizations for documentation and dashboards etc. + """Log a metric Args: key (str): The metric key - value (float): The metric value - inputs (list, optional): A list of input IDs that were used to compute the metric. - params (dict, optional): Dictionary of parameters used to compute the metric. - recorded_at (str, optional): The timestamp of the metric. Server will use - current time if not provided. - thresholds (dict, optional): Dictionary of thresholds for the metric. + value (Union[int, float]): The metric value + inputs (List[str], optional): List of input IDs + params (Dict[str, Any], optional): Parameters used to generate the metric """ - run_async(alog_metric, key, value, inputs, params, recorded_at, thresholds) + return run_async(alog_metric, key=key, value=value, inputs=inputs, params=params) def get_ai_key() -> Dict[str, Any]: diff --git a/validmind/errors.py b/validmind/errors.py index a7db65c49..80183311e 100644 --- a/validmind/errors.py +++ b/validmind/errors.py @@ -129,6 +129,14 @@ class InvalidInputError(BaseError): pass +class InvalidParameterError(BaseError): + """ + When an invalid parameter is provided. + """ + + pass + + class InvalidTextObjectError(APIRequestError): """ When an invalid Metadat (Text) object is sent to the API. diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index 382a24996..b2fa597d3 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -19,6 +19,7 @@ from ... import api_client from ...ai.utils import DescriptionFuture +from ...errors import InvalidParameterError from ...logging import get_logger from ...utils import ( HumanReadableEncoder, @@ -423,10 +424,16 @@ def serialize(self): } async def log_async( - self, section_id: str = None, position: int = None, unsafe: bool = False + self, + section_id: str = None, + position: int = None, + config: Dict[str, bool] = None, ): tasks = [] # collect tasks to run in parallel (async) + # Default empty dict if None + config = config or {} + if self.metric is not None: # metrics are logged as separate entities tasks.append( @@ -444,6 +451,7 @@ async def log_async( result=self.serialize(), section_id=section_id, position=position, + config=config, ) ) @@ -467,7 +475,13 @@ async def log_async( return await asyncio.gather(*tasks) - def log(self, section_id: str = None, position: int = None, unsafe: bool = False): + def log( + self, + section_id: str = None, + position: int = None, + unsafe: bool = False, + config: Dict[str, bool] = None, + ): """Log the result to ValidMind Args: @@ -477,7 +491,16 @@ def log(self, section_id: str = None, position: int = None, unsafe: bool = False result unsafe (bool): If True, log the result even if it contains sensitive data i.e. raw data from input datasets + config (Dict[str, bool]): Configuration options for displaying the test result. + Available config options: + - hideTitle: Hide the title in the document view + - hideText: Hide the description text in the document view + - hideParams: Hide the parameters in the document view + - hideTables: Hide tables in the document view + - hideFigures: Hide figures in the document view """ + if config: + self.validate_log_config(config) self.check_result_id_exist() @@ -488,4 +511,41 @@ def log(self, section_id: str = None, position: int = None, unsafe: bool = False if section_id: self._validate_section_id_for_block(section_id, position) - run_async(self.log_async, section_id=section_id, position=position) + run_async( + self.log_async, + section_id=section_id, + position=position, + config=config, + ) + + def validate_log_config(self, config: Dict[str, bool]): + """Validate the configuration options for logging a test result + + Args: + config (Dict[str, bool]): Configuration options to validate + + Raises: + InvalidParameterError: If config contains invalid keys or non-boolean values + """ + valid_keys = { + "hideTitle", + "hideText", + "hideParams", + "hideTables", + "hideFigures", + } + invalid_keys = set(config.keys()) - valid_keys + if invalid_keys: + raise InvalidParameterError( + f"Invalid config keys: {', '.join(invalid_keys)}. " + f"Valid keys are: {', '.join(valid_keys)}" + ) + + # Ensure all values are boolean + non_bool_keys = [ + key for key, value in config.items() if not isinstance(value, bool) + ] + if non_bool_keys: + raise InvalidParameterError( + f"Values for config keys must be boolean. Non-boolean values found for keys: {', '.join(non_bool_keys)}" + )