Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/test_api_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ def test_log_test_result(self, mock_post):
"inputs": ["input1"],
"passed": True,
"summary": [{"key": "value"}],
"config": None,
}

mock_post.return_value = MockAsyncResponse(200, json={"cuid": "abc1234"})
Expand Down
23 changes: 8 additions & 15 deletions validmind/api_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,8 @@ async def alog_test_result(
result: Dict[str, Any],
section_id: str = None,
position: int = None,
unsafe: bool = False,
config: Dict[str, bool] = None,
) -> Dict[str, Any]:
"""Logs test results information

Expand Down Expand Up @@ -357,7 +359,7 @@ async def alog_test_result(
"log_test_results",
params=request_params,
data=json.dumps(
result,
{**result, "config": config},
cls=NumpyEncoder,
allow_nan=False,
),
Expand Down Expand Up @@ -458,24 +460,15 @@ def log_metric(
recorded_at: Optional[str] = None,
thresholds: Optional[Dict[str, Any]] = None,
):
"""Logs a unit metric

Unit metrics are key-value pairs where the key is the metric name and the value is
a scalar (int or float). These key-value pairs are associated with the currently
selected model (inventory model in the ValidMind Platform) and keys can be logged
to over time to create a history of the metric. On the ValidMind Platform, these metrics
will be used to create plots/visualizations for documentation and dashboards etc.
"""Log a metric

Args:
key (str): The metric key
value (float): The metric value
inputs (list, optional): A list of input IDs that were used to compute the metric.
params (dict, optional): Dictionary of parameters used to compute the metric.
recorded_at (str, optional): The timestamp of the metric. Server will use
current time if not provided.
thresholds (dict, optional): Dictionary of thresholds for the metric.
value (Union[int, float]): The metric value
inputs (List[str], optional): List of input IDs
params (Dict[str, Any], optional): Parameters used to generate the metric
"""
run_async(alog_metric, key, value, inputs, params, recorded_at, thresholds)
return run_async(alog_metric, key=key, value=value, inputs=inputs, params=params)


def get_ai_key() -> Dict[str, Any]:
Expand Down
8 changes: 8 additions & 0 deletions validmind/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,14 @@ class InvalidInputError(BaseError):
pass


class InvalidParameterError(BaseError):
"""
When an invalid parameter is provided.
"""

pass


class InvalidTextObjectError(APIRequestError):
"""
When an invalid Metadat (Text) object is sent to the API.
Expand Down
66 changes: 63 additions & 3 deletions validmind/vm_models/result/result.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

from ... import api_client
from ...ai.utils import DescriptionFuture
from ...errors import InvalidParameterError
from ...logging import get_logger
from ...utils import (
HumanReadableEncoder,
Expand Down Expand Up @@ -423,10 +424,16 @@ def serialize(self):
}

async def log_async(
self, section_id: str = None, position: int = None, unsafe: bool = False
self,
section_id: str = None,
position: int = None,
config: Dict[str, bool] = None,
):
tasks = [] # collect tasks to run in parallel (async)

# Default empty dict if None
config = config or {}

if self.metric is not None:
# metrics are logged as separate entities
tasks.append(
Expand All @@ -444,6 +451,7 @@ async def log_async(
result=self.serialize(),
section_id=section_id,
position=position,
config=config,
)
)

Expand All @@ -467,7 +475,13 @@ async def log_async(

return await asyncio.gather(*tasks)

def log(self, section_id: str = None, position: int = None, unsafe: bool = False):
def log(
self,
section_id: str = None,
position: int = None,
unsafe: bool = False,
config: Dict[str, bool] = None,
):
"""Log the result to ValidMind

Args:
Expand All @@ -477,7 +491,16 @@ def log(self, section_id: str = None, position: int = None, unsafe: bool = False
result
unsafe (bool): If True, log the result even if it contains sensitive data
i.e. raw data from input datasets
config (Dict[str, bool]): Configuration options for displaying the test result.
Available config options:
- hideTitle: Hide the title in the document view
- hideText: Hide the description text in the document view
- hideParams: Hide the parameters in the document view
- hideTables: Hide tables in the document view
- hideFigures: Hide figures in the document view
"""
if config:
self.validate_log_config(config)

self.check_result_id_exist()

Expand All @@ -488,4 +511,41 @@ def log(self, section_id: str = None, position: int = None, unsafe: bool = False
if section_id:
self._validate_section_id_for_block(section_id, position)

run_async(self.log_async, section_id=section_id, position=position)
run_async(
self.log_async,
section_id=section_id,
position=position,
config=config,
)

def validate_log_config(self, config: Dict[str, bool]):
"""Validate the configuration options for logging a test result

Args:
config (Dict[str, bool]): Configuration options to validate

Raises:
InvalidParameterError: If config contains invalid keys or non-boolean values
"""
valid_keys = {
"hideTitle",
"hideText",
"hideParams",
"hideTables",
"hideFigures",
}
invalid_keys = set(config.keys()) - valid_keys
if invalid_keys:
raise InvalidParameterError(
f"Invalid config keys: {', '.join(invalid_keys)}. "
f"Valid keys are: {', '.join(valid_keys)}"
)

# Ensure all values are boolean
non_bool_keys = [
key for key, value in config.items() if not isinstance(value, bool)
]
if non_bool_keys:
raise InvalidParameterError(
f"Values for config keys must be boolean. Non-boolean values found for keys: {', '.join(non_bool_keys)}"
)