From 96a452601d625f44dea4bd66320d167ec596e64d Mon Sep 17 00:00:00 2001 From: Anil Sorathiya Date: Thu, 13 Mar 2025 13:55:04 +0000 Subject: [PATCH 1/6] support config option for logging test results --- validmind/api_client.py | 4 +- validmind/errors.py | 8 +++ validmind/vm_models/result/result.py | 78 ++++++++++++++++++++++++++-- 3 files changed, 86 insertions(+), 4 deletions(-) diff --git a/validmind/api_client.py b/validmind/api_client.py index cb6228616..379c809aa 100644 --- a/validmind/api_client.py +++ b/validmind/api_client.py @@ -330,6 +330,8 @@ async def alog_test_result( result: Dict[str, Any], section_id: str = None, position: int = None, + unsafe: bool = False, + config: Dict[str, bool] = None, ) -> Dict[str, Any]: """Logs test results information @@ -357,7 +359,7 @@ async def alog_test_result( "log_test_results", params=request_params, data=json.dumps( - result, + {**result, "config": config}, cls=NumpyEncoder, allow_nan=False, ), diff --git a/validmind/errors.py b/validmind/errors.py index a7db65c49..80183311e 100644 --- a/validmind/errors.py +++ b/validmind/errors.py @@ -129,6 +129,14 @@ class InvalidInputError(BaseError): pass +class InvalidParameterError(BaseError): + """ + When an invalid parameter is provided. + """ + + pass + + class InvalidTextObjectError(APIRequestError): """ When an invalid Metadat (Text) object is sent to the API. diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index 382a24996..c7b79c7ab 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -19,6 +19,7 @@ from ... import api_client from ...ai.utils import DescriptionFuture +from ...errors import InvalidParameterError from ...logging import get_logger from ...utils import ( HumanReadableEncoder, @@ -423,10 +424,17 @@ def serialize(self): } async def log_async( - self, section_id: str = None, position: int = None, unsafe: bool = False + self, + section_id: str = None, + position: int = None, + unsafe: bool = False, + config: Dict[str, bool] = None, ): tasks = [] # collect tasks to run in parallel (async) + # Default empty dict if None + config = config or {} + if self.metric is not None: # metrics are logged as separate entities tasks.append( @@ -439,11 +447,22 @@ async def log_async( ) if self.tables or self.figures: + # # Include config in the serialized result instead of as a separate parameter + # result_data = self.serialize() + # if config: + # # Add config to metadata if it exists, or create metadata + # if "metadata" not in result_data or result_data["metadata"] is None: + # result_data["metadata"] = {} + # result_data["metadata"]["display_config"] = config + tasks.append( api_client.alog_test_result( + # result=result_data, result=self.serialize(), section_id=section_id, position=position, + unsafe=unsafe, + config=config, ) ) @@ -467,7 +486,13 @@ async def log_async( return await asyncio.gather(*tasks) - def log(self, section_id: str = None, position: int = None, unsafe: bool = False): + def log( + self, + section_id: str = None, + position: int = None, + unsafe: bool = False, + config: Dict[str, bool] = None, + ): """Log the result to ValidMind Args: @@ -477,7 +502,16 @@ def log(self, section_id: str = None, position: int = None, unsafe: bool = False result unsafe (bool): If True, log the result even if it contains sensitive data i.e. raw data from input datasets + config (Dict[str, bool]): Configuration options for displaying the test result. + Available config options: + - hideTitle: Hide the title in the document view + - hideText: Hide the description text in the document view + - hideParams: Hide the parameters in the document view + - hideTables: Hide tables in the document view + - hideFigures: Hide figures in the document view """ + if config: + self.validate_log_config(config) self.check_result_id_exist() @@ -488,4 +522,42 @@ def log(self, section_id: str = None, position: int = None, unsafe: bool = False if section_id: self._validate_section_id_for_block(section_id, position) - run_async(self.log_async, section_id=section_id, position=position) + run_async( + self.log_async, + section_id=section_id, + position=position, + unsafe=unsafe, + config=config, + ) + + def validate_log_config(self, config: Dict[str, bool]): + """Validate the configuration options for logging a test result + + Args: + config (Dict[str, bool]): Configuration options to validate + + Raises: + InvalidParameterError: If config contains invalid keys or non-boolean values + """ + valid_keys = { + "hideTitle", + "hideText", + "hideParams", + "hideTables", + "hideFigures", + } + invalid_keys = set(config.keys()) - valid_keys + if invalid_keys: + raise InvalidParameterError( + f"Invalid config keys: {', '.join(invalid_keys)}. " + f"Valid keys are: {', '.join(valid_keys)}" + ) + + # Ensure all values are boolean + non_bool_keys = [ + key for key, value in config.items() if not isinstance(value, bool) + ] + if non_bool_keys: + raise InvalidParameterError( + f"Values for config keys must be boolean. Non-boolean values found for keys: {', '.join(non_bool_keys)}" + ) From aea59c6bc7e2ec5bd734ae284776d94a77f058e2 Mon Sep 17 00:00:00 2001 From: Anil Sorathiya Date: Thu, 13 Mar 2025 13:58:50 +0000 Subject: [PATCH 2/6] remove commented code --- validmind/vm_models/result/result.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index c7b79c7ab..24c741841 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -447,17 +447,8 @@ async def log_async( ) if self.tables or self.figures: - # # Include config in the serialized result instead of as a separate parameter - # result_data = self.serialize() - # if config: - # # Add config to metadata if it exists, or create metadata - # if "metadata" not in result_data or result_data["metadata"] is None: - # result_data["metadata"] = {} - # result_data["metadata"]["display_config"] = config - tasks.append( api_client.alog_test_result( - # result=result_data, result=self.serialize(), section_id=section_id, position=position, From dba9dfc9a46f784f05bd38eebf946b715eba7755 Mon Sep 17 00:00:00 2001 From: Anil Sorathiya Date: Thu, 13 Mar 2025 17:40:16 +0000 Subject: [PATCH 3/6] update the test --- tests/test_api_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_api_client.py b/tests/test_api_client.py index 3920cdc2f..7e2a9d7b6 100644 --- a/tests/test_api_client.py +++ b/tests/test_api_client.py @@ -201,6 +201,7 @@ def test_log_test_result(self, mock_post): "inputs": ["input1"], "passed": True, "summary": [{"key": "value"}], + "config": None, } mock_post.return_value = MockAsyncResponse(200, json={"cuid": "abc1234"}) From 783c385478013ebda79aa03ac793f062d92a8110 Mon Sep 17 00:00:00 2001 From: Anil Sorathiya Date: Fri, 14 Mar 2025 11:11:58 +0000 Subject: [PATCH 4/6] pass config to log_metric --- validmind/api_client.py | 25 +++++++++++-------------- validmind/vm_models/result/result.py | 1 + 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/validmind/api_client.py b/validmind/api_client.py index 379c809aa..a95e7f754 100644 --- a/validmind/api_client.py +++ b/validmind/api_client.py @@ -412,6 +412,7 @@ async def alog_metric( value: Union[int, float], inputs: Optional[List[str]] = None, params: Optional[Dict[str, Any]] = None, + config: Optional[Dict[str, Any]] = None, recorded_at: Optional[str] = None, thresholds: Optional[Dict[str, Any]] = None, ): @@ -440,6 +441,7 @@ async def alog_metric( "value": value, "inputs": inputs or [], "params": params or {}, + "config": config or {}, "recorded_at": recorded_at, "thresholds": thresholds or {}, }, @@ -457,27 +459,22 @@ def log_metric( value: float, inputs: Optional[List[str]] = None, params: Optional[Dict[str, Any]] = None, + config: Optional[Dict[str, Any]] = None, recorded_at: Optional[str] = None, thresholds: Optional[Dict[str, Any]] = None, ): - """Logs a unit metric - - Unit metrics are key-value pairs where the key is the metric name and the value is - a scalar (int or float). These key-value pairs are associated with the currently - selected model (inventory model in the ValidMind Platform) and keys can be logged - to over time to create a history of the metric. On the ValidMind Platform, these metrics - will be used to create plots/visualizations for documentation and dashboards etc. + """Log a metric Args: key (str): The metric key - value (float): The metric value - inputs (list, optional): A list of input IDs that were used to compute the metric. - params (dict, optional): Dictionary of parameters used to compute the metric. - recorded_at (str, optional): The timestamp of the metric. Server will use - current time if not provided. - thresholds (dict, optional): Dictionary of thresholds for the metric. + value (Union[int, float]): The metric value + inputs (List[str], optional): List of input IDs + params (Dict[str, Any], optional): Parameters used to generate the metric + config (Dict[str, bool], optional): Configuration options for displaying the metric """ - run_async(alog_metric, key, value, inputs, params, recorded_at, thresholds) + return run_async( + alog_metric, key=key, value=value, inputs=inputs, params=params, config=config + ) def get_ai_key() -> Dict[str, Any]: diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index 24c741841..ca8711884 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -443,6 +443,7 @@ async def log_async( value=self.metric, inputs=[input.input_id for input in self._get_flat_inputs()], params=self.params, + config=config, ) ) From 9ccbd1cf15982ae0968a5f751b785be3fb03980e Mon Sep 17 00:00:00 2001 From: Anil Sorathiya Date: Fri, 14 Mar 2025 17:00:39 +0000 Subject: [PATCH 5/6] remove usafe parameter from logging methods --- validmind/vm_models/result/result.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index ca8711884..9d41513d4 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -427,7 +427,6 @@ async def log_async( self, section_id: str = None, position: int = None, - unsafe: bool = False, config: Dict[str, bool] = None, ): tasks = [] # collect tasks to run in parallel (async) @@ -453,7 +452,6 @@ async def log_async( result=self.serialize(), section_id=section_id, position=position, - unsafe=unsafe, config=config, ) ) @@ -518,7 +516,6 @@ def log( self.log_async, section_id=section_id, position=position, - unsafe=unsafe, config=config, ) From 5268daca9c4d06b8b19493d2a01599ad8aa43825 Mon Sep 17 00:00:00 2001 From: Anil Sorathiya Date: Mon, 17 Mar 2025 19:59:21 +0000 Subject: [PATCH 6/6] remove config from the log_metric --- validmind/api_client.py | 8 +------- validmind/vm_models/result/result.py | 1 - 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/validmind/api_client.py b/validmind/api_client.py index a95e7f754..27c167b6f 100644 --- a/validmind/api_client.py +++ b/validmind/api_client.py @@ -412,7 +412,6 @@ async def alog_metric( value: Union[int, float], inputs: Optional[List[str]] = None, params: Optional[Dict[str, Any]] = None, - config: Optional[Dict[str, Any]] = None, recorded_at: Optional[str] = None, thresholds: Optional[Dict[str, Any]] = None, ): @@ -441,7 +440,6 @@ async def alog_metric( "value": value, "inputs": inputs or [], "params": params or {}, - "config": config or {}, "recorded_at": recorded_at, "thresholds": thresholds or {}, }, @@ -459,7 +457,6 @@ def log_metric( value: float, inputs: Optional[List[str]] = None, params: Optional[Dict[str, Any]] = None, - config: Optional[Dict[str, Any]] = None, recorded_at: Optional[str] = None, thresholds: Optional[Dict[str, Any]] = None, ): @@ -470,11 +467,8 @@ def log_metric( value (Union[int, float]): The metric value inputs (List[str], optional): List of input IDs params (Dict[str, Any], optional): Parameters used to generate the metric - config (Dict[str, bool], optional): Configuration options for displaying the metric """ - return run_async( - alog_metric, key=key, value=value, inputs=inputs, params=params, config=config - ) + return run_async(alog_metric, key=key, value=value, inputs=inputs, params=params) def get_ai_key() -> Dict[str, Any]: diff --git a/validmind/vm_models/result/result.py b/validmind/vm_models/result/result.py index 9d41513d4..b2fa597d3 100644 --- a/validmind/vm_models/result/result.py +++ b/validmind/vm_models/result/result.py @@ -442,7 +442,6 @@ async def log_async( value=self.metric, inputs=[input.input_id for input in self._get_flat_inputs()], params=self.params, - config=config, ) )