From 712b007b58b57404ba517ed9283f16fa4001cd27 Mon Sep 17 00:00:00 2001 From: John Walz Date: Fri, 3 Jan 2025 14:13:06 -0500 Subject: [PATCH 01/13] feat: wip updating script for adding descriptions to tests --- scripts/add_test_description.py | 223 ++------------------------------ 1 file changed, 12 insertions(+), 211 deletions(-) diff --git a/scripts/add_test_description.py b/scripts/add_test_description.py index 8eb4f345e..816782188 100644 --- a/scripts/add_test_description.py +++ b/scripts/add_test_description.py @@ -32,7 +32,9 @@ below that will document the tests for developers and risk management teams. For each test you will return a description with the following format and sections: - + + + 1. Purpose 2. Test Mechanism @@ -51,14 +53,14 @@ **Test Mechanism**: This test calculates the PSI by: -- Bucketing the distributions of each feature in both datasets. -- Comparing the percentage of observations in each bucket between the two datasets. +- Bucketing the distributions of each feature in both datasets. +- Comparing the percentage of observations in each bucket between the two datasets. - Aggregating the differences across all buckets for each feature to produce the PSI score for that feature. The PSI score is interpreted as: -- PSI < 0.1: No significant population change. -- PSI < 0.2: Moderate population change. +- PSI < 0.1: No significant population change. +- PSI < 0.2: Moderate population change. - PSI >= 0.2: Significant population change. **Signs of High Risk**: @@ -85,104 +87,11 @@ 5. Limitations: List or describe the limitations or disadvantages of this test, including any potential bias or areas it might not fully address Ensure that each section is populated with succinct, clear, and relevant information pertaining to the test. -Respond with a markdown description where each section name is in header 3 format and then the content for that section. -Make sure to also remove the colon from the end of the header 3 section names and add a line break in between the section name and the section content. -For sections 1-2, make sure the content is in full sentences and paragraph form. -For sections 3-5, the content should be a list of bullet points unless the section has only one or two items, in which case it can be a paragraph. -Respond only with the description and don't include any explanation or other text. Additionally, avoid using enclosing markdown syntax like ```markdown -""".strip() - -fix_prompt = """ -You are an expert in validating Machine Learning models using MRM (Model Risk Management) best practices. -You are also an expert in writing descriptions that are pleasant to read while being very useful. -You will be provided the source code for a test that is run against an ML model. -You will analyze the code to determine the details and implementation of the test. -Finally, you will write clear, descriptive and informative descriptions in the format described -below that will document the tests for developers and risk management teams. - -For each test you will return a description with the following format and sections: - - -1. Purpose -2. Test Mechanism -3. Signs of High Risk -4. Strengths -5. Limitations - -Example description for a "Feature Drift" test: - -``` -Evaluates changes in feature distribution over time to identify potential model drift. - -**Purpose**: -The Feature Drift test aims to evaluate how much the distribution of features has shifted over time between two datasets, typically training and monitoring datasets. It uses the Population Stability Index (PSI) to quantify this change, providing insights into the model’s robustness and the necessity for retraining or feature engineering. - -**Test Mechanism**: -This test calculates the PSI by: - -- Bucketing the distributions of each feature in both datasets. -- Comparing the percentage of observations in each bucket between the two datasets. -- Aggregating the differences across all buckets for each feature to produce the PSI score for that feature. - -The PSI score is interpreted as: - -- PSI < 0.1: No significant population change. -- PSI < 0.2: Moderate population change. -- PSI >= 0.2: Significant population change. - -**Signs of High Risk**: -- PSI >= 0.2 for any feature, indicating a significant distribution shift. -- Consistently high PSI scores across multiple features. -- Sudden spikes in PSI in recent monitoring data compared to historical data. - -**Strengths**: -- Provides a quantitative measure of feature distribution changes. -- Easily interpretable thresholds for decision-making. -- Helps in early detection of data drift, prompting timely interventions. - -**Limitations**: -- May not capture more intricate changes in data distribution nuances. -- Assumes that bucket thresholds (quantiles) adequately represent distribution shifts. -- PSI score interpretation can be overly simplistic for complex datasets. -``` - -These sections have been populated with content according to the following guidelines: -1. Purpose: Brief explanation of why this test is being used and what it is intended to evaluate or measure in relation to the model. -2. Test Mechanism: Describe the methodology used to test or apply the test, including any grading scales or thresholds -3. Signs of High Risk: List or describe any signs or indicators that might suggest a high risk or a failure in the model's performance as related to this metric -4. Strengths: List the strengths or advantages of using this test in evaluating the model -5. Limitations: List the limitations or disadvantages of this test, including any potential bias or areas it might not fully address - -The following prompt was given to GPT4 to generate the descriptions: -''' -Ensure that each section is populated with succinct, clear, and relevant information pertaining to the test. -Respond with a markdown description where each section name is in header 3 format and then the content for that section. +Respond with a markdown description where each section name is in header 3 format and then the content for that section. Make sure to also remove the colon from the end of the header 3 section names and add a line break in between the section name and the section content. For sections 1-2, make sure the content is in full sentences and paragraph form. For sections 3-5, the content should be a list of bullet points unless the section has only one or two items, in which case it can be a paragraph. Respond only with the description and don't include any explanation or other text. Additionally, avoid using enclosing markdown syntax like ```markdown -''' - -Finally, the user submitted the code for the test. - -You will now review the descriptions and fix any of the following issues that may be present: -1. The description is missing a section -2. Sections 3-5 are not formatted as a list of bullet points -3. The description is missing a colon after the section name -4. The description is missing a blank line between sections -""".strip() - -summary_prompt = """ -You are an expert in validating Machine Learning models using MRM (Model Risk Management) best practices. -You are also an expert in writing descriptions that are pleasant to read while being very useful. -You will be provided the source code with a docstring that acts as the description for a test that is run against an ML model. -You will analyze the code and description docstring to determine the details and implementation of the test. -Finally, you will write a clear, descriptive and informative single sentence summary of the description which -can be a one liner about what the test does. - -This one-liner will be used as the first line of the test docstring (description). It should be no more than 120 characters ideally. -It does not have to be a complete sentence but should be grammatically correct. (think of it as a titular description for the test) -Respond with only the summary and don't include any explanation or other text """.strip() @@ -190,9 +99,7 @@ def is_test_function_signature(line, previous_line): """ Test functions should have a @tags or @tasks decorator call on top of them """ - return line.startswith("def") and ( - "@tags" in previous_line or "@tasks" in previous_line - ) + return line.startswith("def") and line.split("def ")[1].isupper() def get_description_lines(lines): @@ -206,9 +113,7 @@ def get_description_lines(lines): advance_to_next_line = False for i, line in enumerate(lines): - if advance_to_next_line or ( - line.startswith("class") or is_test_function_signature(line, lines[i - 1]) - ): + if advance_to_next_line or is_test_function_signature(line, lines[i - 1]): # ensure this is not a multi-line function signature like this: # # def test_function( @@ -313,106 +218,11 @@ def add_description_to_test(path): f.write("\n".join(lines)) -def add_summary_to_test(path): - click.echo(f"\n\n{path}:\n") - with open(path, "r") as f: - file_contents = f.read() - - response = openai.chat.completions.create( - model=OPENAI_GPT_MODEL, - messages=[ - {"role": "system", "content": summary_prompt}, - {"role": "user", "content": f"```python\n{file_contents}```"}, - ], - stream=True, - ) - summary = "" - for chunk in response: - if chunk.choices[0].finish_reason == "stop": - break - - click.echo(chunk.choices[0].delta.content, nl=False) - summary += chunk.choices[0].delta.content - - click.echo("\n") - - summary = indent_and_wrap(summary.strip().strip('"').strip("'")) - - insert_line_num = None - lines = file_contents.split("\n") - past_class_def = False - for i, line in enumerate(lines): - if line.startswith(' """') and past_class_def: - insert_line_num = i + 1 - break - elif line.startswith("class"): - past_class_def = True - - if insert_line_num is None: - raise ValueError("Could not find line number") - - # insert the new description lines - lines.insert(insert_line_num, f"{summary}\n") - - # write the updated file contents back to the file - with open(path, "w") as f: - f.write("\n".join(lines)) - - -def fix_test_description(path): - """You can switch to gpt3.5 if you don't have access but gpt4- should do a better job""" - # get file contents from path - click.echo(f"\n\n{path}:\n") - with open(path, "r") as f: - file_contents = f.read() - - response = openai.chat.completions.create( - model=OPENAI_GPT_MODEL, - messages=[ - {"role": "system", "content": fix_prompt}, - {"role": "user", "content": f"```python\n{file_contents}```"}, - ], - stream=True, - ) - description = "" - for chunk in response: - if chunk.choices[0].finish_reason == "stop": - break - - click.echo(chunk.choices[0].delta.content, nl=False) - description += chunk.choices[0].delta.content - - click.echo("\n") - - # format the description to go into the test code - # the description should be trimmed and have 4 spaces prepended to each line - # each line should be wrapped at 120 characters - description = indent_and_wrap(description.strip()) - lines = file_contents.split("\n") - - definition_line, existing_description_lines = get_description_lines(lines) - - if definition_line is None: - raise ValueError("Could not find class or function definition line") - - # remove any existing description lines - for i in reversed(existing_description_lines): - lines.pop(i) - - # insert the new description lines - lines.insert(definition_line + 1, f' """\n{description}\n """') - - # write the updated file contents back to the file - with open(path, "w") as f: - f.write("\n".join(lines)) - - def _is_test_file(path): return path.endswith(".py") and path.split("/")[-1][0].isupper() @click.command() -@click.argument("action", type=click.Choice(["add", "review", "summarize"])) @click.argument("path", type=click.Path(exists=True, file_okay=True, dir_okay=True)) def main(action, path): """Recursively processes the specified DIRECTORY and updates files needing metadata injection.""" @@ -431,17 +241,8 @@ def main(action, path): if _is_test_file(file): tests_to_process.append(os.path.join(root, file)) - if action == "add": - for file in tests_to_process: - add_description_to_test(file) - - elif action == "review": - for file in tests_to_process: - fix_test_description(file) - - elif action == "summarize": - for file in tests_to_process: - add_summary_to_test(file) + for file in tests_to_process: + add_description_to_test(file) if __name__ == "__main__": From 7f0d389cfc7702db128d2b6e63184eab80ca7c17 Mon Sep 17 00:00:00 2001 From: John Walz Date: Mon, 6 Jan 2025 12:45:07 -0500 Subject: [PATCH 02/13] feat: updating the script for bulk updating tests using AI --- scripts/add_test_description.py | 249 ------------------ scripts/bulk_ai_test_updates.py | 239 +++++++++++++++++ validmind/tests/__types__.py | 6 + .../data_validation/MutualInformation.py | 1 + .../data_validation/ScoreBandDefaultRates.py | 3 +- .../sklearn/CalibrationCurve.py | 5 +- .../ClassifierThresholdOptimization.py | 7 +- .../sklearn/HyperParametersTuning.py | 9 +- .../sklearn/ModelParameters.py | 1 + .../sklearn/ScoreProbabilityAlignment.py | 3 +- 10 files changed, 262 insertions(+), 261 deletions(-) delete mode 100644 scripts/add_test_description.py create mode 100644 scripts/bulk_ai_test_updates.py diff --git a/scripts/add_test_description.py b/scripts/add_test_description.py deleted file mode 100644 index 816782188..000000000 --- a/scripts/add_test_description.py +++ /dev/null @@ -1,249 +0,0 @@ -"""Script that generates a description for a test using GPT-4 and automatically inserts it into the class docstring - - -Usage: - poetry run python scripts/add_test_description.py - - - path: path to a test file or directory containing test files - - action: `add` or `review` - -Before running this, you need to either set an environment variable OPENAI_API_KEY -or create a .env file in the root of the project with the following contents: -OPENAI_API_KEY= -""" - -import os - -import click -import dotenv -import openai - -dotenv.load_dotenv() -openai.api_key = os.getenv("OPENAI_API_KEY") -OPENAI_GPT_MODEL = "gpt-4o" # or gpt-4-turbo or gpt-3.5-turbo etc - - -add_prompt = """ -You are an expert in validating Machine Learning models using MRM (Model Risk Management) best practices. -You are also an expert in writing descriptions that are pleasant to read while being very useful. -You will be provided the source code for a test that is run against an ML model. -You will analyze the code to determine the details and implementation of the test. -Finally, you will write clear, descriptive and informative descriptions in the format described -below that will document the tests for developers and risk management teams. - -For each test you will return a description with the following format and sections: - - - - -1. Purpose -2. Test Mechanism -3. Signs of High Risk -4. Strengths -5. Limitations - -Example description for a "Feature Drift" test: - -``` -Evaluates changes in feature distribution over time to identify potential model drift. - -**Purpose**: -The Feature Drift test aims to evaluate how much the distribution of features has shifted over time between two datasets, typically training and monitoring datasets. It uses the Population Stability Index (PSI) to quantify this change, providing insights into the model’s robustness and the necessity for retraining or feature engineering. - -**Test Mechanism**: -This test calculates the PSI by: - -- Bucketing the distributions of each feature in both datasets. -- Comparing the percentage of observations in each bucket between the two datasets. -- Aggregating the differences across all buckets for each feature to produce the PSI score for that feature. - -The PSI score is interpreted as: - -- PSI < 0.1: No significant population change. -- PSI < 0.2: Moderate population change. -- PSI >= 0.2: Significant population change. - -**Signs of High Risk**: -- PSI >= 0.2 for any feature, indicating a significant distribution shift. -- Consistently high PSI scores across multiple features. -- Sudden spikes in PSI in recent monitoring data compared to historical data. - -**Strengths**: -- Provides a quantitative measure of feature distribution changes. -- Easily interpretable thresholds for decision-making. -- Helps in early detection of data drift, prompting timely interventions. - -**Limitations**: -- May not capture more intricate changes in data distribution nuances. -- Assumes that bucket thresholds (quantiles) adequately represent distribution shifts. -- PSI score interpretation can be overly simplistic for complex datasets. -``` - -You will populate each section according to the following guidelines: -1. Purpose: Brief explanation of why this test is being used and what it is intended to evaluate or measure in relation to the model. -2. Test Mechanism: Describe the methodology used to test or apply the test, including any grading scales or thresholds -3. Signs of High Risk: List or describe any signs or indicators that might suggest a high risk or a failure in the model's performance as related to this metric -4. Strengths: List or describe the strengths or advantages of using this test in evaluating the model -5. Limitations: List or describe the limitations or disadvantages of this test, including any potential bias or areas it might not fully address - -Ensure that each section is populated with succinct, clear, and relevant information pertaining to the test. -Respond with a markdown description where each section name is in header 3 format and then the content for that section. -Make sure to also remove the colon from the end of the header 3 section names and add a line break in between the section name and the section content. -For sections 1-2, make sure the content is in full sentences and paragraph form. -For sections 3-5, the content should be a list of bullet points unless the section has only one or two items, in which case it can be a paragraph. -Respond only with the description and don't include any explanation or other text. Additionally, avoid using enclosing markdown syntax like ```markdown -""".strip() - - -def is_test_function_signature(line, previous_line): - """ - Test functions should have a @tags or @tasks decorator call on top of them - """ - return line.startswith("def") and line.split("def ")[1].isupper() - - -def get_description_lines(lines): - """ - Find the line number of the docstring that contains the description - """ - # insert the description into the test code - # the description should be inserted after the class definition line - class_definition_line = None - existing_description_lines = [] - - advance_to_next_line = False - for i, line in enumerate(lines): - if advance_to_next_line or is_test_function_signature(line, lines[i - 1]): - # ensure this is not a multi-line function signature like this: - # - # def test_function( - # arg1, - # arg2 - # ): - # - # we want to keep iterating until we find the closing parenthesis - if ")" not in line: - advance_to_next_line = True - continue - - class_definition_line = i - # check if there is already a doc string for the class - if '"""' in lines[i + 1]: - existing_description_lines.append(i + 1) - j = i + 2 - while j < len(lines): - existing_description_lines.append(j) - if '"""' in lines[j]: - break - j += 1 - - advance_to_next_line = False - break - - if class_definition_line is None: - raise ValueError("Could not find class or function definition line") - - return class_definition_line, existing_description_lines - - -def indent_and_wrap(text, indentation=4, wrap_length=120): - lines = text.split("\n") - result = [] - - for line in lines: - if line == "": - result.append("") - continue - - line = " " * indentation + line - - while len(line) > wrap_length: - space_index = line.rfind(" ", 0, wrap_length) - - if space_index == -1: - space_index = wrap_length - - result.append(line[:space_index]) - line = " " * indentation + line[space_index:].lstrip() - - result.append(line) - - return "\n".join(result) - - -def add_description_to_test(path): - """Generate a test description using gpt4 - You can switch to gpt3.5 if you don't have access but gpt4 should do a better job - """ - # get file contents from path - click.echo(f"\n\n{path}:\n") - with open(path, "r") as f: - file_contents = f.read() - - response = openai.chat.completions.create( - model=OPENAI_GPT_MODEL, - messages=[ - {"role": "system", "content": add_prompt}, - {"role": "user", "content": f"```python\n{file_contents}```"}, - ], - stream=True, - ) - description = "" - for chunk in response: - if chunk.choices[0].finish_reason == "stop": - break - - click.echo(chunk.choices[0].delta.content, nl=False) - description += chunk.choices[0].delta.content - - click.echo("\n") - - # format the description to go into the test code - # the description should be trimmed and have 4 spaces prepended to each line - # each line should be wrapped at 120 characters - description = indent_and_wrap(description.strip()) - lines = file_contents.split("\n") - - class_definition_line, existing_description_lines = get_description_lines(lines) - - # remove any existing description lines - for i in reversed(existing_description_lines): - lines.pop(i) - - # insert the new description lines - lines.insert(class_definition_line + 1, f' """\n{description}\n """') - - # write the updated file contents back to the file - with open(path, "w") as f: - f.write("\n".join(lines)) - - -def _is_test_file(path): - return path.endswith(".py") and path.split("/")[-1][0].isupper() - - -@click.command() -@click.argument("path", type=click.Path(exists=True, file_okay=True, dir_okay=True)) -def main(action, path): - """Recursively processes the specified DIRECTORY and updates files needing metadata injection.""" - tests_to_process = [] - - # check if path is a file or directory - if os.path.isfile(path): - if _is_test_file(path): - tests_to_process.append(path) - else: - raise ValueError(f"File {path} is not a test file") - - elif os.path.isdir(path): - for root, dirs, files in os.walk(path): - for file in files: - if _is_test_file(file): - tests_to_process.append(os.path.join(root, file)) - - for file in tests_to_process: - add_description_to_test(file) - - -if __name__ == "__main__": - main() diff --git a/scripts/bulk_ai_test_updates.py b/scripts/bulk_ai_test_updates.py new file mode 100644 index 000000000..d8e809bbd --- /dev/null +++ b/scripts/bulk_ai_test_updates.py @@ -0,0 +1,239 @@ +"""Script that runs bulk updates on test files using AI + + +Usage: + poetry run python scripts/bulk_ai_test_updates.py --action + + - path: path to a test file or directory containing test files + - action: `add_description` or `add_raw_data` + +Before running this, you need to either set an environment variable OPENAI_API_KEY +or create a .env file in the root of the project with the following contents: +OPENAI_API_KEY= +""" + +import os + +import click +import dotenv +from openai import OpenAI +from pydantic import BaseModel +from textwrap import indent, fill + +dotenv.load_dotenv() + +OPENAI_GPT_MODEL = "gpt-4o" # or gpt-4-turbo or gpt-3.5-turbo etc + +client = OpenAI() + + +class TestDescription(BaseModel): + summary: str + purpose: str + test_mechanism: str + signs_of_high_risk: list[str] + strengths: list[str] + limitations: list[str] + + def to_str(self): + def list_to_str(lst): + my_str = "" + for item in lst: + my_str += indent(fill(f" - {item}", width=116), " ") + my_str += "\n" + return my_str.strip("\n") + + # formatted to 120 chars wide and indented 4 spaces as its a function docstring + return f''' """ +{indent(fill(self.summary, width=116), " ")} + + **Purpose**: + +{indent(fill(self.purpose, width=116), " ")} + + **Test Mechanism**: + +{indent(fill(self.test_mechanism, width=116), " ")} + + **Signs of High Risk**: +{list_to_str(self.signs_of_high_risk)} + + **Strengths**: +{list_to_str(self.strengths)} + + **Limitations**: +{list_to_str(self.limitations)} + """''' + + +add_prompt = """ +You are an expert in validating Machine Learning models using MRM (Model Risk Management) best practices. +You are also an expert in writing descriptions that are pleasant to read while being very useful. +You will be provided the source code for a test that is run against an ML model. +You will analyze the code to determine the details and implementation of the test. +Finally, you will write clear, descriptive and informative descriptions in the format described below that will document the test. + +Ignore existing docstrings if you think they are incorrect or incomplete. The code itself should be the source of truth. + +For each test you will write and return the following sections: + +1. Short single sentence summary of the test +2. Purpose +3. Test Mechanism +4. Signs of High Risk +5. Strengths +6. Limitations + +Example description for a "Feature Drift" test: + +``` +Evaluates changes in feature distribution over time to identify potential model drift. + +**Purpose**: + +The Feature Drift test aims to evaluate how much the distribution of features has shifted over time between two datasets, typically training and monitoring datasets. It uses the Population Stability Index (PSI) to quantify this change, providing insights into the model’s robustness and the necessity for retraining or feature engineering. + +**Test Mechanism**: + +This test calculates the PSI by: +- Bucketing the distributions of each feature in both datasets. +- Comparing the percentage of observations in each bucket between the two datasets. +- Aggregating the differences across all buckets for each feature to produce the PSI score for that feature. + +The PSI score is interpreted as: +- PSI < 0.1: No significant population change. +- PSI < 0.2: Moderate population change. +- PSI >= 0.2: Significant population change. + +**Signs of High Risk**: +- PSI >= 0.2 for any feature, indicating a significant distribution shift. +- Consistently high PSI scores across multiple features. +- Sudden spikes in PSI in recent monitoring data compared to historical data. + +**Strengths**: +- Provides a quantitative measure of feature distribution changes. +- Easily interpretable thresholds for decision-making. +- Helps in early detection of data drift, prompting timely interventions. + +**Limitations**: +- May not capture more intricate changes in data distribution nuances. +- Assumes that bucket thresholds (quantiles) adequately represent distribution shifts. +- PSI score interpretation can be overly simplistic for complex datasets. +``` + +You will populate each section according to the following guidelines: +1. Summary: A single sentence summary of the test that is easy to digest for both technical and non-technical audiences. +2. Purpose: Brief explanation of why this test is being used and what it is intended to evaluate or measure in relation to the model. +3. Test Mechanism: Describe the methodology used to test or apply the test, including any grading scales or thresholds +4. Signs of High Risk: Short list of the signs or indicators that might suggest a high risk or a failure in the model's performance as related to this metric +5. Strengths: Short list of the strengths or advantages of using this test in evaluating the model +6. Limitations: Short list of the limitations or disadvantages of this test, including any potential bias or areas it might not fully address + +Ensure that each section is populated with succinct, clear, and relevant information pertaining to the test. +For sections 1-3, make sure the content is in full sentences and paragraph form. +For sections 4-6, the content should be a list of bullet points returned as a list of strings. Keep the list short and concise and only include the most important points. +""".strip() + + +def add_description_to_test(path): + """Generate a test description using gpt4 + You can switch to gpt3.5 if you don't have access but gpt4 should do a better job + """ + # get file contents from path + click.echo(f"> {path}") + with open(path, "r") as f: + file_contents = f.read() + + test_name = path.split("/")[-1].split(".")[0] + + response = client.beta.chat.completions.parse( + model=OPENAI_GPT_MODEL, + messages=[ + {"role": "system", "content": add_prompt}, + {"role": "user", "content": f"```python\n{file_contents}```"}, + ], + response_format=TestDescription, + ) + description = response.choices[0].message.parsed + + lines = file_contents.split("\n") + + # find the test function definition + test_def_start = 0 + test_def_end = 0 + # find start of test function definition + for i, line in enumerate(lines): + if "def" in line and test_name in line: + test_def_start = i + break + # handle multiline test function definitions + for i, line in enumerate(lines[test_def_start:]): + if "):" in line: + test_def_end = i + test_def_start + 1 + break + # handle existing docstrings + if '"""' in lines[test_def_end]: + lines_to_remove = [test_def_end] + for i, line in enumerate(lines[test_def_end + 1 :]): + lines_to_remove.append(test_def_end + i + 1) + if '"""' in line: + break + for i in reversed(lines_to_remove): + lines.pop(i) + # insert the new description lines + lines.insert(test_def_end, description.to_str()) + + with open(path, "w") as f: + f.write("\n".join(lines)) + + +def add_raw_data_to_test(path): + """Add raw data to a test file""" + pass + + +def _is_test_file(path): + return path.endswith(".py") and path.split("/")[-1][0].isupper() + + +@click.command() +@click.argument( + "path", + type=click.Path(exists=True, file_okay=True, dir_okay=True), + required=True, +) +@click.option( + "--action", + type=click.Choice(["add_description", "add_raw_data"]), + required=True, +) +def main(action, path): + """Recursively processes the specified DIRECTORY and updates files needing metadata injection.""" + tests_to_process = [] + + # check if path is a file or directory + if os.path.isfile(path): + if _is_test_file(path): + tests_to_process.append(path) + else: + raise ValueError(f"File {path} is not a test file") + + elif os.path.isdir(path): + for root, dirs, files in os.walk(path): + for file in files: + if _is_test_file(file): + tests_to_process.append(os.path.join(root, file)) + + if action == "add_description": + func = add_description_to_test + elif action == "add_raw_data": + func = add_raw_data_to_test + else: + raise ValueError(f"Invalid action: {action}") + + for file in tests_to_process: + func(file) + + +if __name__ == "__main__": + main() diff --git a/validmind/tests/__types__.py b/validmind/tests/__types__.py index 0c637afb7..ce0d1cd88 100644 --- a/validmind/tests/__types__.py +++ b/validmind/tests/__types__.py @@ -39,6 +39,7 @@ "validmind.data_validation.LaggedCorrelationHeatmap", "validmind.data_validation.MissingValues", "validmind.data_validation.MissingValuesBarPlot", + "validmind.data_validation.MutualInformation", "validmind.data_validation.PearsonCorrelationMatrix", "validmind.data_validation.PhillipsPerronArch", "validmind.data_validation.ProtectedClassesCombination", @@ -48,6 +49,7 @@ "validmind.data_validation.RollingStatsPlot", "validmind.data_validation.RunsTest", "validmind.data_validation.ScatterPlot", + "validmind.data_validation.ScoreBandDefaultRates", "validmind.data_validation.SeasonalDecompose", "validmind.data_validation.ShapiroWilk", "validmind.data_validation.Skewness", @@ -121,7 +123,9 @@ "validmind.model_validation.ragas.SemanticSimilarity", "validmind.model_validation.sklearn.AdjustedMutualInformation", "validmind.model_validation.sklearn.AdjustedRandIndex", + "validmind.model_validation.sklearn.CalibrationCurve", "validmind.model_validation.sklearn.ClassifierPerformance", + "validmind.model_validation.sklearn.ClassifierThresholdOptimization", "validmind.model_validation.sklearn.ClusterCosineSimilarity", "validmind.model_validation.sklearn.ClusterPerformanceMetrics", "validmind.model_validation.sklearn.CompletenessScore", @@ -134,6 +138,7 @@ "validmind.model_validation.sklearn.MinimumAccuracy", "validmind.model_validation.sklearn.MinimumF1Score", "validmind.model_validation.sklearn.MinimumROCAUCScore", + "validmind.model_validation.sklearn.ModelParameters", "validmind.model_validation.sklearn.ModelsPerformanceComparison", "validmind.model_validation.sklearn.OverfitDiagnosis", "validmind.model_validation.sklearn.PermutationFeatureImportance", @@ -147,6 +152,7 @@ "validmind.model_validation.sklearn.RegressionR2SquareComparison", "validmind.model_validation.sklearn.RobustnessDiagnosis", "validmind.model_validation.sklearn.SHAPGlobalImportance", + "validmind.model_validation.sklearn.ScoreProbabilityAlignment", "validmind.model_validation.sklearn.SilhouettePlot", "validmind.model_validation.sklearn.TrainingTestDegradation", "validmind.model_validation.sklearn.VMeasure", diff --git a/validmind/tests/data_validation/MutualInformation.py b/validmind/tests/data_validation/MutualInformation.py index 6358a5c1b..0f6cfbeb7 100644 --- a/validmind/tests/data_validation/MutualInformation.py +++ b/validmind/tests/data_validation/MutualInformation.py @@ -4,6 +4,7 @@ import plotly.graph_objects as go from sklearn.feature_selection import mutual_info_classif, mutual_info_regression + from validmind import tags, tasks from validmind.vm_models import VMDataset from validmind.vm_models.result import RawData diff --git a/validmind/tests/data_validation/ScoreBandDefaultRates.py b/validmind/tests/data_validation/ScoreBandDefaultRates.py index 567c07279..1f96b12a6 100644 --- a/validmind/tests/data_validation/ScoreBandDefaultRates.py +++ b/validmind/tests/data_validation/ScoreBandDefaultRates.py @@ -2,8 +2,9 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -import pandas as pd import numpy as np +import pandas as pd + from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel diff --git a/validmind/tests/model_validation/sklearn/CalibrationCurve.py b/validmind/tests/model_validation/sklearn/CalibrationCurve.py index 71739bbb8..0792f62a5 100644 --- a/validmind/tests/model_validation/sklearn/CalibrationCurve.py +++ b/validmind/tests/model_validation/sklearn/CalibrationCurve.py @@ -2,10 +2,11 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from sklearn.calibration import calibration_curve import plotly.graph_objects as go +from sklearn.calibration import calibration_curve + from validmind import tags, tasks -from validmind.vm_models import VMModel, VMDataset +from validmind.vm_models import VMDataset, VMModel from validmind.vm_models.result import RawData diff --git a/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py b/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py index 1cdc62998..0a4d4f442 100644 --- a/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py +++ b/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py @@ -6,11 +6,8 @@ import pandas as pd import plotly.graph_objects as go from plotly.subplots import make_subplots -from sklearn.metrics import ( - roc_curve, - precision_recall_curve, - confusion_matrix, -) +from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve + from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel diff --git a/validmind/tests/model_validation/sklearn/HyperParametersTuning.py b/validmind/tests/model_validation/sklearn/HyperParametersTuning.py index 1e9e30acf..dd90a44af 100644 --- a/validmind/tests/model_validation/sklearn/HyperParametersTuning.py +++ b/validmind/tests/model_validation/sklearn/HyperParametersTuning.py @@ -2,9 +2,10 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from typing import Union, Dict, List -from sklearn.model_selection import GridSearchCV +from typing import Dict, List, Union + from sklearn.metrics import make_scorer, recall_score +from sklearn.model_selection import GridSearchCV from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -24,7 +25,9 @@ def _get_metrics(scoring): return ( scoring if isinstance(scoring, list) - else list(scoring.keys()) if isinstance(scoring, dict) else [scoring] + else list(scoring.keys()) + if isinstance(scoring, dict) + else [scoring] ) diff --git a/validmind/tests/model_validation/sklearn/ModelParameters.py b/validmind/tests/model_validation/sklearn/ModelParameters.py index 62983d134..f24d83dd5 100644 --- a/validmind/tests/model_validation/sklearn/ModelParameters.py +++ b/validmind/tests/model_validation/sklearn/ModelParameters.py @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial import pandas as pd + from validmind import tags, tasks diff --git a/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py b/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py index dadb3e974..7246ca573 100644 --- a/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py +++ b/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py @@ -4,8 +4,9 @@ import pandas as pd import plotly.graph_objects as go + from validmind import tags, tasks -from validmind.vm_models import VMModel, VMDataset +from validmind.vm_models import VMDataset, VMModel @tags("visualization", "credit_risk", "calibration") From bd17823c19e7602ff74cd6b830e37f9e62ea20d0 Mon Sep 17 00:00:00 2001 From: John Walz Date: Mon, 6 Jan 2025 20:45:44 -0500 Subject: [PATCH 03/13] feat: adding new action to add raw data to tests --- scripts/bulk_ai_test_updates.py | 100 +++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 2 deletions(-) diff --git a/scripts/bulk_ai_test_updates.py b/scripts/bulk_ai_test_updates.py index d8e809bbd..0fce7a38c 100644 --- a/scripts/bulk_ai_test_updates.py +++ b/scripts/bulk_ai_test_updates.py @@ -13,6 +13,7 @@ """ import os +import subprocess import click import dotenv @@ -135,6 +136,78 @@ def list_to_str(lst): """.strip() +raw_data_prompt = """ +You are an expert Python engineer and data scientist with broad experience across many domains. +ValidMind is a company that provides a Python SDK for building and running tests for the purposes of model risk management. +You will be provided with the source code for a "test" that is run against an ML model or dataset. +You will analyze the code to determine the details and implementation of the test. +Then you will use the below example to implement changes to the test to make it use the new raw data mechanism offered by the ValidMind SDK. + +Example test without raw data: + +``` +... # existing code, imports, etc. +from validmind import tags, tasks +... + +def ExampleConfusionMatrix(model: VMModel, dataset: VMDataset): + y_pred = dataset.y_pred(model) + y_true = dataset.y.astype(y_pred.dtype) + + labels = np.unique(y_true) + labels = sorted(labels.tolist()) + + cm = confusion_matrix(y_true, y_pred, labels=labels) + + fig = ff.create_annotated_heatmap() + .. + + return fig +``` + +Example test with raw data: + +``` +... # existing code, imports, etc. +from validmind import tags, tasks, RawData +... + + +def ExampleConfusionMatrix(model: VMModel, dataset: VMDataset): + + y_pred = dataset.y_pred(model) + y_true = dataset.y.astype(y_pred.dtype) + + labels = np.unique(y_true) + labels = sorted(labels.tolist()) + + cm = confusion_matrix(y_true, y_pred, labels=labels) + + fig = ff.create_annotated_heatmap() + .. + + return fig, RawData(confusion_matrix=cm) +``` + +Notice that the test now returns a tuple of the figure and the raw data. +Tests can return any number of objects (plots, tables, values, etc.) as part of a tuple or a single object. +The new RawData object can be used to store any number of intermediate data objects that are used to generate the final output. +The goal is to store these for later post-processing functions that may want to re-generate the final output in a different format. +The RawData object is simply a class that can store any number of any type of objects using a key-value like interface where the key in the constructor is the name of the object and the value is the object itself. +Also notice the import of the RawData object. + +You will return the updated test code (make sure to include all the existing imports, copyrights, comments, etc.). +Return only the updated code and nothing else. +Do not wrap the code in backticks, simply return valid Python code. +If the test already uses the RawData object, simply return the original code without any changes and without backticks. + +Prefer dataframes over dictionaries or numpy arrays but don't force it if the test only uses dictionaries or some other format. +Be intentional about the name of the key in the RawData object, it should be a short, descriptive name that is easy for developers to understand and use. +Do not use vague names like "data", "results", "output", etc. Use something specific to the test and descriptive of the data being stored. +Ideally, the raw data should end up containing anything needed to re-generate the final output (assuming that the original inputs and parameters are available). +""" + + def add_description_to_test(path): """Generate a test description using gpt4 You can switch to gpt3.5 if you don't have access but gpt4 should do a better job @@ -189,7 +262,27 @@ def add_description_to_test(path): def add_raw_data_to_test(path): """Add raw data to a test file""" - pass + # get file contents from path + click.echo(f"> {path}") + with open(path, "r") as f: + file_contents = f.read() + + response = client.chat.completions.create( + model=OPENAI_GPT_MODEL, + messages=[ + {"role": "system", "content": raw_data_prompt}, + {"role": "user", "content": f"```python\n{file_contents}```"}, + ], + ) + + updated_file_contents = response.choices[0].message.content + # remove starting "```python" and ending "```" + updated_file_contents = ( + updated_file_contents.lstrip("```python").rstrip("```").strip() + ) + + with open(path, "w") as f: + f.write(updated_file_contents) def _is_test_file(path): @@ -219,7 +312,7 @@ def main(action, path): raise ValueError(f"File {path} is not a test file") elif os.path.isdir(path): - for root, dirs, files in os.walk(path): + for root, _, files in os.walk(path): for file in files: if _is_test_file(file): tests_to_process.append(os.path.join(root, file)) @@ -234,6 +327,9 @@ def main(action, path): for file in tests_to_process: func(file) + # run black on the tests directory + subprocess.run(["poetry", "run", "black", "validmind/tests"]) + if __name__ == "__main__": main() From 7a32ee64b7cb9067f06b3b5ef5c1626a9edc6c26 Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 14:23:05 -0500 Subject: [PATCH 04/13] feat: adding raw data across all tests --- scripts/bulk_ai_test_updates.py | 193 +++++++++++++++++- .../tests/data_validation/ACFandPACFPlot.py | 8 +- validmind/tests/data_validation/AutoMA.py | 4 +- .../data_validation/BivariateScatterPlots.py | 6 +- validmind/tests/data_validation/BoxPierce.py | 4 +- .../tests/data_validation/ClassImbalance.py | 3 +- .../data_validation/DatasetDescription.py | 13 +- .../tests/data_validation/DatasetSplit.py | 4 +- .../tests/data_validation/DickeyFullerGLS.py | 4 +- .../FeatureTargetCorrelationPlot.py | 10 +- .../tests/data_validation/HighCardinality.py | 11 +- .../data_validation/HighPearsonCorrelation.py | 8 +- .../data_validation/IQROutliersBarPlot.py | 11 +- .../IsolationForestOutliers.py | 4 +- validmind/tests/data_validation/JarqueBera.py | 4 +- validmind/tests/data_validation/KPSS.py | 6 +- validmind/tests/data_validation/LJungBox.py | 6 +- .../LaggedCorrelationHeatmap.py | 4 +- .../data_validation/MissingValuesBarPlot.py | 21 +- .../data_validation/MutualInformation.py | 7 +- .../PearsonCorrelationMatrix.py | 4 +- .../ProtectedClassesCombination.py | 7 +- .../ProtectedClassesDescription.py | 4 +- .../ProtectedClassesDisparity.py | 9 +- .../ProtectedClassesThresholdOptimizer.py | 3 +- .../tests/data_validation/RollingStatsPlot.py | 31 ++- validmind/tests/data_validation/RunsTest.py | 4 +- .../tests/data_validation/ScatterPlot.py | 10 +- .../data_validation/SeasonalDecompose.py | 14 +- .../tests/data_validation/ShapiroWilk.py | 4 +- validmind/tests/data_validation/Skewness.py | 12 +- validmind/tests/data_validation/SpreadPlot.py | 11 +- .../TabularCategoricalBarPlots.py | 6 +- .../TabularDateTimeHistograms.py | 4 +- .../TabularNumericalHistograms.py | 2 +- .../data_validation/TargetRateBarPlots.py | 7 +- .../data_validation/TimeSeriesFrequency.py | 9 +- .../data_validation/TimeSeriesHistogram.py | 6 +- .../data_validation/TimeSeriesLinePlot.py | 4 +- .../TimeSeriesMissingValues.py | 24 ++- .../data_validation/TimeSeriesOutliers.py | 3 +- .../tests/data_validation/WOEBinPlots.py | 4 +- .../tests/data_validation/WOEBinTable.py | 20 +- .../tests/data_validation/ZivotAndrewsArch.py | 15 +- .../tests/data_validation/nlp/CommonWords.py | 4 +- .../tests/data_validation/nlp/Hashtags.py | 4 +- .../data_validation/nlp/LanguageDetection.py | 15 +- .../tests/data_validation/nlp/Mentions.py | 15 +- .../nlp/PolarityAndSubjectivity.py | 4 +- .../tests/data_validation/nlp/Punctuations.py | 5 +- .../tests/data_validation/nlp/Sentiment.py | 4 +- .../tests/data_validation/nlp/StopWords.py | 3 +- .../data_validation/nlp/TextDescription.py | 4 +- .../tests/data_validation/nlp/Toxicity.py | 4 +- validmind/tests/model_validation/BertScore.py | 4 +- validmind/tests/model_validation/BleuScore.py | 4 +- .../ClusterSizeDistribution.py | 4 +- .../model_validation/ContextualRecall.py | 4 +- .../tests/model_validation/FeaturesAUC.py | 4 +- .../tests/model_validation/MeteorScore.py | 4 +- .../ModelPredictionResiduals.py | 4 +- .../tests/model_validation/RegardScore.py | 8 +- .../RegressionResidualsPlot.py | 7 +- .../tests/model_validation/RougeScore.py | 7 +- .../TimeSeriesPredictionWithCI.py | 13 +- .../TimeSeriesPredictionsPlot.py | 6 +- .../TimeSeriesR2SquareBySegments.py | 4 +- .../tests/model_validation/TokenDisparity.py | 4 +- .../tests/model_validation/ToxicityScore.py | 12 +- .../embeddings/ClusterDistribution.py | 12 +- .../embeddings/CosineSimilarityComparison.py | 18 +- .../CosineSimilarityDistribution.py | 8 +- .../embeddings/CosineSimilarityHeatmap.py | 4 +- .../embeddings/DescriptiveAnalytics.py | 18 +- .../embeddings/EmbeddingsVisualization2D.py | 4 +- .../embeddings/EuclideanDistanceComparison.py | 18 +- .../embeddings/EuclideanDistanceHeatmap.py | 4 +- .../embeddings/PCAComponentsPairwisePlots.py | 9 +- .../embeddings/StabilityAnalysisKeyword.py | 6 +- .../StabilityAnalysisRandomNoise.py | 6 +- .../embeddings/StabilityAnalysisSynonyms.py | 4 +- .../StabilityAnalysisTranslation.py | 6 +- .../embeddings/TSNEComponentsPairwisePlots.py | 14 +- .../ragas/AnswerCorrectness.py | 3 +- .../model_validation/ragas/AspectCritic.py | 18 +- .../ragas/ContextEntityRecall.py | 3 +- .../ragas/ContextPrecision.py | 3 +- .../ragas/ContextPrecisionWithoutReference.py | 3 +- .../model_validation/ragas/ContextRecall.py | 3 +- .../model_validation/ragas/Faithfulness.py | 3 +- .../ragas/NoiseSensitivity.py | 3 +- .../ragas/ResponseRelevancy.py | 3 +- .../ragas/SemanticSimilarity.py | 3 +- .../sklearn/AdjustedMutualInformation.py | 15 +- .../sklearn/AdjustedRandIndex.py | 16 +- .../ClassifierThresholdOptimization.py | 15 +- .../sklearn/ClusterCosineSimilarity.py | 7 +- .../sklearn/ClusterPerformanceMetrics.py | 31 +-- .../sklearn/CompletenessScore.py | 21 +- .../sklearn/ConfusionMatrix.py | 4 +- .../sklearn/FeatureImportance.py | 5 +- .../sklearn/FowlkesMallowsScore.py | 16 +- .../sklearn/HomogeneityScore.py | 20 +- .../sklearn/HyperParametersTuning.py | 10 +- .../sklearn/KMeansClustersOptimization.py | 4 +- .../sklearn/MinimumROCAUCScore.py | 18 +- .../sklearn/OverfitDiagnosis.py | 12 +- .../sklearn/PermutationFeatureImportance.py | 4 +- .../sklearn/PopulationStabilityIndex.py | 36 ++-- .../sklearn/PrecisionRecallCurve.py | 6 +- .../model_validation/sklearn/ROCCurve.py | 2 +- .../sklearn/RegressionErrors.py | 4 +- .../sklearn/RegressionPerformance.py | 4 +- .../sklearn/RegressionR2SquareComparison.py | 4 +- .../sklearn/SHAPGlobalImportance.py | 3 +- .../sklearn/ScoreProbabilityAlignment.py | 4 +- .../sklearn/SilhouettePlot.py | 8 +- .../sklearn/TrainingTestDegradation.py | 10 +- .../model_validation/sklearn/VMeasure.py | 19 +- .../sklearn/WeakspotsDiagnosis.py | 5 + .../CumulativePredictionProbabilities.py | 4 +- .../statsmodels/DurbinWatsonTest.py | 4 +- .../model_validation/statsmodels/GINITable.py | 5 +- .../statsmodels/KolmogorovSmirnov.py | 4 +- .../statsmodels/Lilliefors.py | 4 +- .../PredictionProbabilitiesHistogram.py | 4 +- .../statsmodels/RegressionCoeffs.py | 4 +- .../RegressionFeatureSignificance.py | 6 +- .../RegressionModelForecastPlot.py | 12 +- .../RegressionModelForecastPlotLevels.py | 7 +- .../RegressionModelSensitivityPlot.py | 4 +- .../statsmodels/RegressionModelSummary.py | 6 +- .../RegressionPermutationFeatureImportance.py | 4 +- .../statsmodels/ScorecardHistogram.py | 5 +- .../tests/ongoing_monitoring/FeatureDrift.py | 8 +- .../PredictionAcrossEachFeature.py | 8 +- .../PredictionCorrelation.py | 9 +- .../TargetPredictionDistributionPlot.py | 6 +- validmind/tests/prompt_validation/Bias.py | 10 +- validmind/tests/prompt_validation/Clarity.py | 8 +- .../tests/prompt_validation/Conciseness.py | 22 +- .../tests/prompt_validation/Delimitation.py | 22 +- .../prompt_validation/NegativeInstruction.py | 4 +- .../tests/prompt_validation/Robustness.py | 8 +- .../tests/prompt_validation/Specificity.py | 22 +- 145 files changed, 936 insertions(+), 442 deletions(-) diff --git a/scripts/bulk_ai_test_updates.py b/scripts/bulk_ai_test_updates.py index 0fce7a38c..33506a1b6 100644 --- a/scripts/bulk_ai_test_updates.py +++ b/scripts/bulk_ai_test_updates.py @@ -14,6 +14,7 @@ import os import subprocess +import textwrap import click import dotenv @@ -27,6 +28,31 @@ client = OpenAI() +USER_PROMPT = ( + None # can be hardcoded instead of it being prompted from the command line +) +# USER_PROMPT = """ +# Can you change any tests that add a bunch of figures to a list called `returns` and then add a raw data or other stuff to the list at the end before converting it to a tuple in the return. +# These instances would be cleaner to read if instead a `figures` list was used and this list was unpacked into the return tuple like this: +# ``` +# returns = [] +# ... +# returns.append(RawData(some_key=some_value)) +# return tuple(returns) +# ``` + +# to this: + +# ``` +# figures = [] +# ... +# return (*figures, RawData(some_key=some_value)) +# ``` + +# If the test doesn't have any figures or only has one figure,or the figures are not stored in a list called `returns`, then just return `NO CHANGE`. +# You are looking for the exact pattern described above. Avoid unnecessary changes. +# """ + class TestDescription(BaseModel): summary: str @@ -139,6 +165,22 @@ def list_to_str(lst): raw_data_prompt = """ You are an expert Python engineer and data scientist with broad experience across many domains. ValidMind is a company that provides a Python SDK for building and running tests for the purposes of model risk management. +ValidMind's SDK offers a library of "test" functions that are run with our test harness against many types of models and datasets. +These test functions return either a single object or a tuple of objects. +These objects are turned into a test result report by the test harness. +They can return any number of the following types of objects: +- Tables (dataframes or lists of dictionaries) +- Figures (matplotlib or plotly figures) +- Values (scalars, vectors, etc.) +- Pass/Fail (a boolean value that indicates whether the test passed or failed) +- Raw Data (intermediate data helpful for re-generating any of the above objects when post-processing the test result) + +Tests can return either a single object from the above list or a tuple of these objects in any order. + +The raw data is a new feature that allows tests to return intermediate data that is not appropriate to show in the test result but is helpful if the user adds a post-processing function to modify the test result. +Its a class that can be initialized with any number of any type of objects using a key-value like interface where the key in the constructor is the name of the object and the value is the object itself. +It should only be used to store data that is not already returned as part of the test result (i.e. in a table) but could be useful to re-generate any of the test result objects (tables, figures). + You will be provided with the source code for a "test" that is run against an ML model or dataset. You will analyze the code to determine the details and implementation of the test. Then you will use the below example to implement changes to the test to make it use the new raw data mechanism offered by the ValidMind SDK. @@ -190,10 +232,6 @@ def ExampleConfusionMatrix(model: VMModel, dataset: VMDataset): ``` Notice that the test now returns a tuple of the figure and the raw data. -Tests can return any number of objects (plots, tables, values, etc.) as part of a tuple or a single object. -The new RawData object can be used to store any number of intermediate data objects that are used to generate the final output. -The goal is to store these for later post-processing functions that may want to re-generate the final output in a different format. -The RawData object is simply a class that can store any number of any type of objects using a key-value like interface where the key in the constructor is the name of the object and the value is the object itself. Also notice the import of the RawData object. You will return the updated test code (make sure to include all the existing imports, copyrights, comments, etc.). @@ -201,10 +239,76 @@ def ExampleConfusionMatrix(model: VMModel, dataset: VMDataset): Do not wrap the code in backticks, simply return valid Python code. If the test already uses the RawData object, simply return the original code without any changes and without backticks. -Prefer dataframes over dictionaries or numpy arrays but don't force it if the test only uses dictionaries or some other format. +Prefer dataframes over dictionaries or numpy arrays when adding raw data but don't force it if the test only uses dictionaries or some other format. Be intentional about the name of the key in the RawData object, it should be a short, descriptive name that is easy for developers to understand and use. Do not use vague names like "data", "results", "output", etc. Use something specific to the test and descriptive of the data being stored. Ideally, the raw data should end up containing anything needed to re-generate the final output (assuming that the original inputs and parameters are available). + +If the test doesn't really have anything that should be stored as raw data, just return the original code and nothing else. +If the test already returns one or more tables that include all the data that you would add to the raw data, then don't add raw data. +The user will prefer the tables over the raw data since they are easier to understand and use. +The raw data is most impactful when it contains data used to produce one or more figures or when it contains intermediate data that is used to produce aggregated or summary tables. + +Some notes: +- ValidMind tests should return a single tuple. +- Multiple figures, tables, etc. can be returned as part of a single top level return tuple. +- The only exception is when the test returns multiple tables with titles. These are returned as a dictionary where the keys are the table titles and the values are the tables objects (dataframes or lists of dictionaries). +- If the test uses a list to collect multiple figures, etc. and then converts that list to a tuple when returning, you should add the raw data to the end of the list before it is converted to a tuple. + - In this case, rename the list if its "figures" or something similar to avoid confusion (a good name would be "returns") + +DO NOT CHANGE ANYTHING OTHER THAN ADDING THE NEW RAW DATA MECHANISM... I.E. DO NOT REMOVE ANYTHING FROM THE RETURN TUPLE OR THE RETURN VALUE (if it is a single object) +""" + +custom_prompt_system = """ +You are an expert Python engineer and data scientist with broad experience across many domains. +ValidMind is a company that provides a Python SDK for building and running tests for the purposes of model risk management. +ValidMind's SDK offers a library of "test" functions that are run with our test harness against many types of models and datasets. +These test functions return either a single object or a tuple of objects. +These objects are turned into a test result report by the test harness. +They can return any number of the following types of objects: +- Tables (dataframes or lists of dictionaries) +- Figures (matplotlib or plotly figures) +- Values (scalars, vectors, etc.) +- Pass/Fail (a boolean value that indicates whether the test passed or failed) +- Raw Data (intermediate data helpful for re-generating any of the above objects when post-processing the test result) + +Tests can return either a single object from the above list or a tuple of these objects in any order. + +You will be provided with custom instructions from the user on how to modify one or more tests. + +You will then be provided with the source code for a test (one by one). +You will analyze the code carefully and then generate an updated version of the code to meet the user's instructions. +You will then return the updated test code (make sure to include all the existing imports, copyrights, comments, etc.). + +Return only the updated code and nothing else. +Do not wrap the code in backticks, simply return valid Python code. + +The only execption is if the test doesn't need to be modified. +In this case, return the following string exactly: `NO CHANGE` +""" + +review_prompt_system = """ +You are an expert Python engineer and data scientist with broad experience across many domains. +ValidMind is a company that provides a Python SDK for building and running tests for the purposes of model risk management. +ValidMind's SDK offers a library of "test" functions that are run with our test harness against many types of models and datasets. +These test functions return either a single object or a tuple of objects. +These objects are turned into a test result report by the test harness. +They can return any number of the following types of objects: +- Tables (dataframes or lists of dictionaries) +- Figures (matplotlib or plotly figures) +- Values (scalars, vectors, etc.) +- Pass/Fail (a boolean value that indicates whether the test passed or failed) +- Raw Data (intermediate data helpful for re-generating any of the above objects when post-processing the test result) + +Tests can return either a single object from the above list or a tuple of these objects in any order. + +You will be provided with custom instructions from the user on how to review one or more tests. + +You will then be provided with the source code for a test (one by one). +You will analyze the code to determine the details and implementation of the test. +You will follow the user's instructions to review and then provide feedback on the test. + +If the test does not need any feedback, simply return the following string exactly: `NO FEEDBACK NEEDED` """ @@ -285,6 +389,60 @@ def add_raw_data_to_test(path): f.write(updated_file_contents) +def custom_prompt(path, user_prompt): + """Custom prompt for a test file""" + # get file contents from path + click.echo(f"> {path}") + with open(path, "r") as f: + file_contents = f.read() + + response = client.chat.completions.create( + model=OPENAI_GPT_MODEL, + messages=[ + {"role": "system", "content": custom_prompt_system}, + {"role": "user", "content": user_prompt}, + {"role": "user", "content": f"```python\n{file_contents}```"}, + ], + ) + + if "NO CHANGE" in response.choices[0].message.content: + click.echo("No changes needed") + return + + updated_file_contents = response.choices[0].message.content + # remove starting "```python" and ending "```" + updated_file_contents = ( + updated_file_contents.lstrip("```python").rstrip("```").strip() + ) + + with open(path, "w") as f: + f.write(updated_file_contents) + + +def custom_review(path, user_prompt): + """Custom review for a test file""" + # get file contents from path + click.echo(f"\n> {path}") + with open(path, "r") as f: + file_contents = f.read() + + response = client.chat.completions.create( + model=OPENAI_GPT_MODEL, + messages=[ + {"role": "system", "content": review_prompt_system}, + {"role": "user", "content": user_prompt}, + {"role": "user", "content": f"```python\n{file_contents}```"}, + ], + ) + + if "NO FEEDBACK NEEDED" in response.choices[0].message.content: + click.echo("No feedback needed") + return + + feedback = response.choices[0].message.content + click.echo(textwrap.indent(feedback, " ")) + + def _is_test_file(path): return path.endswith(".py") and path.split("/")[-1][0].isupper() @@ -297,11 +455,21 @@ def _is_test_file(path): ) @click.option( "--action", - type=click.Choice(["add_description", "add_raw_data"]), + type=click.Choice( + ["add_description", "add_raw_data", "custom_prompt", "custom_review"] + ), required=True, ) -def main(action, path): +@click.option( + "--model", + type=click.Choice(["gpt-4o", "gpt-4o-mini"]), + default="gpt-4o", +) +def main(action, path, model): """Recursively processes the specified DIRECTORY and updates files needing metadata injection.""" + global OPENAI_GPT_MODEL + OPENAI_GPT_MODEL = model + tests_to_process = [] # check if path is a file or directory @@ -321,6 +489,17 @@ def main(action, path): func = add_description_to_test elif action == "add_raw_data": func = add_raw_data_to_test + elif action == "custom_prompt": + if not USER_PROMPT: + user_prompt = input("Enter your prompt: ") + user_prompt = user_prompt.strip("\n").strip() + else: + user_prompt = USER_PROMPT + func = lambda path: custom_prompt(path, user_prompt) + elif action == "custom_review": + review_prompt = input("Enter your review prompt: ") + review_prompt = review_prompt.strip("\n").strip() + func = lambda path: custom_review(path, review_prompt) else: raise ValueError(f"Invalid action: {action}") diff --git a/validmind/tests/data_validation/ACFandPACFPlot.py b/validmind/tests/data_validation/ACFandPACFPlot.py index 971a5e910..804960efe 100644 --- a/validmind/tests/data_validation/ACFandPACFPlot.py +++ b/validmind/tests/data_validation/ACFandPACFPlot.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from statsmodels.tsa.stattools import acf, pacf -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -62,6 +62,8 @@ def ACFandPACFPlot(dataset: VMDataset): raise ValueError("Provided 'columns' must exist in the dataset") figures = [] + acf_store = {} + pacf_store = {} for col in df.columns: series = df[col] max_lags = min(40, len(series) // 2 - 1) @@ -77,6 +79,7 @@ def ACFandPACFPlot(dataset: VMDataset): font=dict(size=18), ) figures.append(acf_fig) + acf_store[col] = acf_values # Create PACF plot using Plotly pacf_values = pacf(series, nlags=max_lags) @@ -89,5 +92,6 @@ def ACFandPACFPlot(dataset: VMDataset): font=dict(size=18), ) figures.append(pacf_fig) + pacf_store[col] = pacf_values - return tuple(figures) + return (*figures, RawData(acf_values=acf_store, pacf_values=pacf_store)) diff --git a/validmind/tests/data_validation/AutoMA.py b/validmind/tests/data_validation/AutoMA.py index d89e104cd..a1cf2f34c 100644 --- a/validmind/tests/data_validation/AutoMA.py +++ b/validmind/tests/data_validation/AutoMA.py @@ -6,7 +6,7 @@ from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.stattools import adfuller -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -116,4 +116,4 @@ def AutoMA(dataset: VMDataset, max_ma_order: int = 3): return { "Auto MA Analysis Results": summary_ma_analysis, "Best MA Order Results": best_ma_order, - } + }, RawData(raw_series_data=df) diff --git a/validmind/tests/data_validation/BivariateScatterPlots.py b/validmind/tests/data_validation/BivariateScatterPlots.py index 6c259ab99..9c29a96fc 100644 --- a/validmind/tests/data_validation/BivariateScatterPlots.py +++ b/validmind/tests/data_validation/BivariateScatterPlots.py @@ -6,7 +6,7 @@ import plotly.express as px -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("tabular_data", "numerical_data", "visualization") @@ -79,4 +79,6 @@ def BivariateScatterPlots(dataset): figures.append(fig) - return tuple(figures) + return tuple(figures) + ( + RawData(selected_numerical_df=df, feature_pairs=features_pairs), + ) diff --git a/validmind/tests/data_validation/BoxPierce.py b/validmind/tests/data_validation/BoxPierce.py index 74407cc0e..cf9b8ac35 100644 --- a/validmind/tests/data_validation/BoxPierce.py +++ b/validmind/tests/data_validation/BoxPierce.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.diagnostic import acorr_ljungbox -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tasks("regression") @@ -68,4 +68,4 @@ def BoxPierce(dataset): box_pierce_df.reset_index(inplace=True) box_pierce_df.columns = ["column", "stat", "pvalue"] - return box_pierce_df + return box_pierce_df, RawData(box_pierce_values=box_pierce_values) diff --git a/validmind/tests/data_validation/ClassImbalance.py b/validmind/tests/data_validation/ClassImbalance.py index 97c48ff79..24968231d 100644 --- a/validmind/tests/data_validation/ClassImbalance.py +++ b/validmind/tests/data_validation/ClassImbalance.py @@ -9,7 +9,7 @@ import plotly.graph_objs as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -104,4 +104,5 @@ def ClassImbalance( }, go.Figure(data=[trace], layout=layout), all(row["Pass/Fail"] == "Pass" for row in imbalanced_classes), + RawData(imbalance_percentages=imbalance_percentages), ) diff --git a/validmind/tests/data_validation/DatasetDescription.py b/validmind/tests/data_validation/DatasetDescription.py index fc5e7cb41..cec442297 100644 --- a/validmind/tests/data_validation/DatasetDescription.py +++ b/validmind/tests/data_validation/DatasetDescription.py @@ -9,7 +9,7 @@ from ydata_profiling.config import Settings from ydata_profiling.model.typeset import ProfilingTypeSet -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import UnsupportedColumnTypeError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -220,6 +220,15 @@ def DatasetDescription(dataset: VMDataset): for column in infer_datatypes(df): results.append(describe_column(df, column)) + raw_data = { + column["id"]: { + "type": column["type"], + "statistics": column["statistics"], + "histograms": column["histograms"], + } + for column in results + } + return { "Dataset Description": [ { @@ -233,4 +242,4 @@ def DatasetDescription(dataset: VMDataset): } for column in results ] - } + }, RawData(raw_data=raw_data) diff --git a/validmind/tests/data_validation/DatasetSplit.py b/validmind/tests/data_validation/DatasetSplit.py index 2f7dac7cf..7910ce046 100644 --- a/validmind/tests/data_validation/DatasetSplit.py +++ b/validmind/tests/data_validation/DatasetSplit.py @@ -5,7 +5,7 @@ from typing import List from validmind import tags, tasks -from validmind.vm_models import VMDataset +from validmind.vm_models import RawData, VMDataset DATASET_LABELS = { "train_ds": "Training", @@ -98,4 +98,4 @@ def DatasetSplit(datasets: List[VMDataset]): } ) - return table + return table, RawData(dataset_results=results) diff --git a/validmind/tests/data_validation/DickeyFullerGLS.py b/validmind/tests/data_validation/DickeyFullerGLS.py index 6142a73a4..d53a9aa76 100644 --- a/validmind/tests/data_validation/DickeyFullerGLS.py +++ b/validmind/tests/data_validation/DickeyFullerGLS.py @@ -6,7 +6,7 @@ from arch.unitroot import DFGLS from numpy.linalg import LinAlgError -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -97,4 +97,4 @@ def DickeyFullerGLS(dataset: VMDataset): return { "DFGLS Test Results": dfgls_values, - } + }, RawData(df=df) diff --git a/validmind/tests/data_validation/FeatureTargetCorrelationPlot.py b/validmind/tests/data_validation/FeatureTargetCorrelationPlot.py index f5880a102..43e2d7dd7 100644 --- a/validmind/tests/data_validation/FeatureTargetCorrelationPlot.py +++ b/validmind/tests/data_validation/FeatureTargetCorrelationPlot.py @@ -6,7 +6,7 @@ import numpy as np import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("tabular_data", "visualization", "correlation") @@ -58,7 +58,13 @@ def FeatureTargetCorrelationPlot(dataset, fig_height=600): fig = _visualize_feature_target_correlation(df, dataset.target_column, fig_height) - return fig + correlations = ( + df.corr(numeric_only=True)[dataset.target_column] + .drop(dataset.target_column) + .to_frame() + ) + + return fig, RawData(correlation_data=correlations) def _visualize_feature_target_correlation(df, target_column, fig_height): diff --git a/validmind/tests/data_validation/HighCardinality.py b/validmind/tests/data_validation/HighCardinality.py index 205ee4697..445fe1be8 100644 --- a/validmind/tests/data_validation/HighCardinality.py +++ b/validmind/tests/data_validation/HighCardinality.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -59,6 +59,8 @@ def HighCardinality( table = [] all_passed = True + raw_data = {} + for col in dataset.feature_columns_categorical: n_distinct = df[col].nunique() p_distinct = n_distinct / df.shape[0] @@ -73,7 +75,12 @@ def HighCardinality( } ) + raw_data[col] = { + "n_distinct": n_distinct, + "p_distinct": p_distinct, + } + if not passed: all_passed = False - return table, all_passed + return table, all_passed, RawData(raw_cardinality_details=raw_data) diff --git a/validmind/tests/data_validation/HighPearsonCorrelation.py b/validmind/tests/data_validation/HighPearsonCorrelation.py index 5be563185..700c894e3 100644 --- a/validmind/tests/data_validation/HighPearsonCorrelation.py +++ b/validmind/tests/data_validation/HighPearsonCorrelation.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -71,4 +71,8 @@ def HighPearsonCorrelation( pairs.sort(key=lambda x: abs(x["Coefficient"]), reverse=True) pairs = pairs[:top_n_correlations] - return pairs, all(p["Pass/Fail"] == "Pass" for p in pairs) + return ( + pairs, + all(p["Pass/Fail"] == "Pass" for p in pairs), + RawData(correlation_matrix=corr), + ) diff --git a/validmind/tests/data_validation/IQROutliersBarPlot.py b/validmind/tests/data_validation/IQROutliersBarPlot.py index 395fdb225..0efe50334 100644 --- a/validmind/tests/data_validation/IQROutliersBarPlot.py +++ b/validmind/tests/data_validation/IQROutliersBarPlot.py @@ -4,7 +4,7 @@ import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -118,4 +118,11 @@ def IQROutliersBarPlot( ) figures.append(fig) - return tuple(figures) + return ( + *figures, + RawData( + outlier_counts_by_feature=df[dataset.feature_columns_numeric].apply( + lambda col: compute_outliers(col, threshold) + ) + ), + ) diff --git a/validmind/tests/data_validation/IsolationForestOutliers.py b/validmind/tests/data_validation/IsolationForestOutliers.py index 6b6b191fc..1fb95f595 100644 --- a/validmind/tests/data_validation/IsolationForestOutliers.py +++ b/validmind/tests/data_validation/IsolationForestOutliers.py @@ -8,7 +8,7 @@ import seaborn as sns from sklearn.ensemble import IsolationForest -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -93,4 +93,4 @@ def IsolationForestOutliers( plt.close() - return tuple(figures) + return tuple(figures + [RawData(predictions=y_pred)]) diff --git a/validmind/tests/data_validation/JarqueBera.py b/validmind/tests/data_validation/JarqueBera.py index 927474a5b..e7f12c902 100644 --- a/validmind/tests/data_validation/JarqueBera.py +++ b/validmind/tests/data_validation/JarqueBera.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.stattools import jarque_bera -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tasks("classification", "regression") @@ -67,4 +67,4 @@ def JarqueBera(dataset): jb_df.reset_index(inplace=True) jb_df.columns = ["column", "stat", "pvalue", "skew", "kurtosis"] - return jb_df + return jb_df, RawData(df_features=df) diff --git a/validmind/tests/data_validation/KPSS.py b/validmind/tests/data_validation/KPSS.py index 9d3d4985e..75ac336f4 100644 --- a/validmind/tests/data_validation/KPSS.py +++ b/validmind/tests/data_validation/KPSS.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.tsa.stattools import kpss -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -80,6 +80,8 @@ def KPSS(dataset: VMDataset): if not kpss_table: raise SkipTestError(f"No KPSS results found for dataset: {dataset.input_id}") + kpss_df = pd.DataFrame(kpss_table) + return { "KPSS Test Results": kpss_table, - } + }, RawData(kpss_results=kpss_df) diff --git a/validmind/tests/data_validation/LJungBox.py b/validmind/tests/data_validation/LJungBox.py index f746379bb..d89afbf3b 100644 --- a/validmind/tests/data_validation/LJungBox.py +++ b/validmind/tests/data_validation/LJungBox.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.diagnostic import acorr_ljungbox -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tasks("regression") @@ -52,15 +52,17 @@ def LJungBox(dataset): df = dataset.df ljung_box_values = {} + raw_data = {} for col in df.columns: lb_results = acorr_ljungbox(df[col].values, return_df=True) ljung_box_values[col] = { "stat": lb_results.iloc[0]["lb_stat"], "pvalue": lb_results.iloc[0]["lb_pvalue"], } + raw_data[col] = lb_results ljung_box_df = pd.DataFrame.from_dict(ljung_box_values, orient="index") ljung_box_df.reset_index(inplace=True) ljung_box_df.columns = ["column", "stat", "pvalue"] - return ljung_box_df + return ljung_box_df, RawData(ljung_box_raw=raw_data) diff --git a/validmind/tests/data_validation/LaggedCorrelationHeatmap.py b/validmind/tests/data_validation/LaggedCorrelationHeatmap.py index a1ae2ac8b..61c4d561c 100644 --- a/validmind/tests/data_validation/LaggedCorrelationHeatmap.py +++ b/validmind/tests/data_validation/LaggedCorrelationHeatmap.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.figure_factory as ff -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset # Define the 'coolwarm' color scale manually @@ -101,4 +101,4 @@ def LaggedCorrelationHeatmap(dataset: VMDataset, num_lags: int = 10): xaxis_title="Lags", ) - return fig + return fig, RawData(correlation_matrix=correlation_df) diff --git a/validmind/tests/data_validation/MissingValuesBarPlot.py b/validmind/tests/data_validation/MissingValuesBarPlot.py index b98d8b315..9267f1a16 100644 --- a/validmind/tests/data_validation/MissingValuesBarPlot.py +++ b/validmind/tests/data_validation/MissingValuesBarPlot.py @@ -4,7 +4,7 @@ import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -106,13 +106,16 @@ def MissingValuesBarPlot( line=dict(color="red", dash="dash"), ) - return go.Figure( - data=[trace_below_threshold, trace_above_threshold, threshold_line], - layout=go.Layout( - title="Missing Values", - yaxis=dict(title="Columns"), - xaxis=dict(title="Missing Value Percentage (%)", range=[0, 100]), - barmode="stack", - height=fig_height, + return ( + go.Figure( + data=[trace_below_threshold, trace_above_threshold, threshold_line], + layout=go.Layout( + title="Missing Values", + yaxis=dict(title="Columns"), + xaxis=dict(title="Missing Value Percentage (%)", range=[0, 100]), + barmode="stack", + height=fig_height, + ), ), + RawData(missing_percentages=missing_percentages_sorted), ) diff --git a/validmind/tests/data_validation/MutualInformation.py b/validmind/tests/data_validation/MutualInformation.py index 0f6cfbeb7..329c7a400 100644 --- a/validmind/tests/data_validation/MutualInformation.py +++ b/validmind/tests/data_validation/MutualInformation.py @@ -79,9 +79,8 @@ def MutualInformation( # Create DataFrame for raw data raw_data = RawData( - feature=dataset.feature_columns, - mutual_information_score=mi_scores.tolist(), - pass_fail=["Pass" if score >= min_threshold else "Fail" for score in mi_scores], + features=dataset.feature_columns, + mutual_information_scores=mi_scores.tolist(), ) # Create Plotly figure @@ -127,4 +126,4 @@ def MutualInformation( template="plotly_white", ) - return raw_data, fig + return fig, raw_data diff --git a/validmind/tests/data_validation/PearsonCorrelationMatrix.py b/validmind/tests/data_validation/PearsonCorrelationMatrix.py index 8f581eeb0..9cfb6a284 100644 --- a/validmind/tests/data_validation/PearsonCorrelationMatrix.py +++ b/validmind/tests/data_validation/PearsonCorrelationMatrix.py @@ -5,7 +5,7 @@ import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("tabular_data", "numerical_data", "correlation") @@ -88,4 +88,4 @@ def PearsonCorrelationMatrix(dataset): fig = go.Figure(data=[heatmap], layout=layout) - return fig + return fig, RawData(correlation_matrix=corr_matrix) diff --git a/validmind/tests/data_validation/ProtectedClassesCombination.py b/validmind/tests/data_validation/ProtectedClassesCombination.py index ec3b7e3a0..bbd48c8c8 100644 --- a/validmind/tests/data_validation/ProtectedClassesCombination.py +++ b/validmind/tests/data_validation/ProtectedClassesCombination.py @@ -8,7 +8,7 @@ import plotly.graph_objects as go import plotly.subplots as sp -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from validmind.logging import get_logger @@ -202,4 +202,9 @@ def ProtectedClassesCombination(dataset, model, protected_classes=None): {"Class Combination Table": metrics_by_group}, {"DPR and EOR table": dpr_eor_df}, fig, + RawData( + metrics_frame=mf, + demographic_parity_ratios=m_dpr, + equalized_odds_ratios=m_eqo, + ), ) diff --git a/validmind/tests/data_validation/ProtectedClassesDescription.py b/validmind/tests/data_validation/ProtectedClassesDescription.py index aed695847..038770a93 100644 --- a/validmind/tests/data_validation/ProtectedClassesDescription.py +++ b/validmind/tests/data_validation/ProtectedClassesDescription.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger logger = get_logger(__name__) @@ -127,4 +127,4 @@ def ProtectedClassesDescription(dataset, protected_classes=None): ["Protected Class", "Count"], ascending=[True, False] ) - return (stats_df, *tuple(figures)) + return (stats_df, *tuple(figures), RawData(grouped_counts=counts)) diff --git a/validmind/tests/data_validation/ProtectedClassesDisparity.py b/validmind/tests/data_validation/ProtectedClassesDisparity.py index fb8d00fe3..61a7d9b1e 100644 --- a/validmind/tests/data_validation/ProtectedClassesDisparity.py +++ b/validmind/tests/data_validation/ProtectedClassesDisparity.py @@ -119,7 +119,7 @@ def ProtectedClassesDisparity( mask_significance=True, ) - plots = [] + figures = [] for protected_class in protected_classes: plot = ap.disparity( bdf, metrics, protected_class, fairness_threshold=disparity_tolerance @@ -129,13 +129,12 @@ def ProtectedClassesDisparity( plot.save( buf, format="png" ) # as long as the above library is installed, this will work - plots.append(buf.getvalue()) + figures.append(buf.getvalue()) string = "_disparity" metrics_adj = [x + string for x in metrics] table = bdf[["attribute_name", "attribute_value"] + b.list_disparities(bdf)] - plots.append(aqp.plot_disparity_all(bdf, metrics=metrics_adj)) - plots_return = tuple(plots) + figures.append(aqp.plot_disparity_all(bdf, metrics=metrics_adj)) - return (table, *plots_return) + return (table, *figures) diff --git a/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py b/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py index aa0f8d909..924676da8 100644 --- a/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py +++ b/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt import pandas as pd -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from validmind.logging import get_logger @@ -106,6 +106,7 @@ def ProtectedClassesThresholdOptimizer( return ( {"DPR and EOR Table": fairness_metrics.reset_index()}, fig, + RawData(threshold_optimizer=threshold_optimizer, y_pred_opt=y_pred_opt), ) diff --git a/validmind/tests/data_validation/RollingStatsPlot.py b/validmind/tests/data_validation/RollingStatsPlot.py index 77834f85d..2d2e2d8f8 100644 --- a/validmind/tests/data_validation/RollingStatsPlot.py +++ b/validmind/tests/data_validation/RollingStatsPlot.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import pandas as pd -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -95,13 +95,24 @@ def RollingStatsPlot(dataset: VMDataset, window_size: int = 12): if not pd.api.types.is_datetime64_any_dtype(dataset.df.index): raise SkipTestError("Index must be a datetime type") - return tuple( - [ - plot_rolling_statistics( - df=dataset.df.dropna(), - col=col, - window_size=window_size, - ) - for col in dataset.feature_columns - ] + figures = [ + plot_rolling_statistics( + df=dataset.df.dropna(), + col=col, + window_size=window_size, + ) + for col in dataset.feature_columns + ] + + return ( + *figures, + RawData( + rolling_means_stds={ + col: { + "rolling_mean": dataset.df[col].rolling(window=window_size).mean(), + "rolling_std": dataset.df[col].rolling(window=window_size).std(), + } + for col in dataset.feature_columns + } + ), ) diff --git a/validmind/tests/data_validation/RunsTest.py b/validmind/tests/data_validation/RunsTest.py index 7004b238d..8eaab7d31 100644 --- a/validmind/tests/data_validation/RunsTest.py +++ b/validmind/tests/data_validation/RunsTest.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.sandbox.stats.runs import runstest_1samp -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tasks("classification", "regression") @@ -69,4 +69,4 @@ def RunsTest(dataset): runs_test_df.reset_index(inplace=True) runs_test_df.columns = ["feature", "stat", "pvalue"] - return runs_test_df + return runs_test_df, RawData(runs_test_values=runs_test_values) diff --git a/validmind/tests/data_validation/ScatterPlot.py b/validmind/tests/data_validation/ScatterPlot.py index f0474aa28..748592592 100644 --- a/validmind/tests/data_validation/ScatterPlot.py +++ b/validmind/tests/data_validation/ScatterPlot.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import seaborn as sns -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("tabular_data", "visualization") @@ -55,8 +55,8 @@ def ScatterPlot(dataset): - Assumes that the dataset can fit into the computer's memory, which might not be valid for extremely large datasets. """ - g = sns.pairplot(data=dataset.df, diag_kind="kde") + for ax in g.axes.flatten(): # rotate x axis labels ax.set_xlabel(ax.get_xlabel(), rotation=45) @@ -64,12 +64,10 @@ def ScatterPlot(dataset): ax.set_ylabel(ax.get_ylabel(), rotation=45) # set y labels alignment ax.yaxis.get_label().set_horizontalalignment("right") + # Get the current figure fig = plt.gcf() - figures = [] - figures.append(fig) - plt.close("all") - return tuple(figures) + return fig, RawData(dataset_dataframe=dataset.df) diff --git a/validmind/tests/data_validation/SeasonalDecompose.py b/validmind/tests/data_validation/SeasonalDecompose.py index 4ba067ff2..a764dbfba 100644 --- a/validmind/tests/data_validation/SeasonalDecompose.py +++ b/validmind/tests/data_validation/SeasonalDecompose.py @@ -9,7 +9,7 @@ from scipy import stats from statsmodels.tsa.seasonal import seasonal_decompose -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -65,6 +65,8 @@ def SeasonalDecompose(dataset: VMDataset, seasonal_model: str = "additive"): figures = [] + raw_data = {} + for col in df.columns: series = df[col].dropna() @@ -153,7 +155,15 @@ def SeasonalDecompose(dataset: VMDataset, seasonal_model: str = "additive"): figures.append(fig) + # Add the decomposed components to raw_data + raw_data[col] = { + "observed": sd.observed, + "trend": sd.trend, + "seasonal": sd.seasonal, + "residuals": sd.resid, + } + if not figures: raise SkipTestError("No valid features found for seasonal decomposition") - return tuple(figures) + return (*figures, RawData(decomposed_components=raw_data)) diff --git a/validmind/tests/data_validation/ShapiroWilk.py b/validmind/tests/data_validation/ShapiroWilk.py index b41d0cae3..a855ddf96 100644 --- a/validmind/tests/data_validation/ShapiroWilk.py +++ b/validmind/tests/data_validation/ShapiroWilk.py @@ -5,7 +5,7 @@ import pandas as pd from scipy import stats -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tasks("classification", "regression") @@ -66,4 +66,4 @@ def ShapiroWilk(dataset): sw_df.reset_index(inplace=True) sw_df.columns = ["column", "stat", "pvalue"] - return sw_df + return sw_df, RawData(statistics=sw_values) diff --git a/validmind/tests/data_validation/Skewness.py b/validmind/tests/data_validation/Skewness.py index 38347385f..df8f6e8fe 100644 --- a/validmind/tests/data_validation/Skewness.py +++ b/validmind/tests/data_validation/Skewness.py @@ -5,7 +5,7 @@ from ydata_profiling.config import Settings from ydata_profiling.model.typeset import ProfilingTypeSet -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("data_quality", "tabular_data") @@ -73,6 +73,10 @@ def Skewness(dataset, max_threshold=1): } ) - return { - "Skewness Results for Dataset": results_table, - }, passed + return ( + { + "Skewness Results for Dataset": results_table, + }, + passed, + RawData(skewness_values=skewness), + ) diff --git a/validmind/tests/data_validation/SpreadPlot.py b/validmind/tests/data_validation/SpreadPlot.py index a3d1fd739..0113a0ac0 100644 --- a/validmind/tests/data_validation/SpreadPlot.py +++ b/validmind/tests/data_validation/SpreadPlot.py @@ -6,7 +6,7 @@ import pandas as pd import seaborn as sns -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -70,6 +70,7 @@ def SpreadPlot(dataset: VMDataset): ] figures = [] + spread_data = {} for var1, var2 in feature_pairs: fig, ax = plt.subplots() @@ -80,8 +81,9 @@ def SpreadPlot(dataset: VMDataset): y=0.95, ) + spread_series = df[var1] - df[var2] sns.lineplot( - data=df[var1] - df[var2], + data=spread_series, ax=ax, ) @@ -89,5 +91,8 @@ def SpreadPlot(dataset: VMDataset): ax.tick_params(axis="both", labelsize=18) figures.append(fig) + spread_data[f"{var1}_{var2}_spread"] = spread_series.to_frame( + name=f"spread_{var1}_{var2}" + ) - return tuple(figures) + return tuple(figures) + (RawData(spread_data=spread_data),) diff --git a/validmind/tests/data_validation/TabularCategoricalBarPlots.py b/validmind/tests/data_validation/TabularCategoricalBarPlots.py index e12e08394..ca3c16930 100644 --- a/validmind/tests/data_validation/TabularCategoricalBarPlots.py +++ b/validmind/tests/data_validation/TabularCategoricalBarPlots.py @@ -4,7 +4,7 @@ import plotly.graph_objs as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -66,9 +66,11 @@ def TabularCategoricalBarPlots(dataset: VMDataset): ] figures = [] + counts_dict = {} for col in dataset.feature_columns_categorical: counts = dataset.df[col].value_counts() + counts_dict[col] = counts fig = go.Figure() fig.add_trace( @@ -90,4 +92,4 @@ def TabularCategoricalBarPlots(dataset: VMDataset): ) figures.append(fig) - return tuple(figures) + return (*figures, RawData(category_counts=counts_dict)) diff --git a/validmind/tests/data_validation/TabularDateTimeHistograms.py b/validmind/tests/data_validation/TabularDateTimeHistograms.py index 04ffccba2..bf56f0d82 100644 --- a/validmind/tests/data_validation/TabularDateTimeHistograms.py +++ b/validmind/tests/data_validation/TabularDateTimeHistograms.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -72,4 +72,4 @@ def TabularDateTimeHistograms(dataset: VMDataset): font=dict(size=18), ) - return fig + return fig, RawData(date_differences=date_diffs) diff --git a/validmind/tests/data_validation/TabularNumericalHistograms.py b/validmind/tests/data_validation/TabularNumericalHistograms.py index 438cdfd89..d9c4c9148 100644 --- a/validmind/tests/data_validation/TabularNumericalHistograms.py +++ b/validmind/tests/data_validation/TabularNumericalHistograms.py @@ -4,7 +4,7 @@ import plotly.graph_objs as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset diff --git a/validmind/tests/data_validation/TargetRateBarPlots.py b/validmind/tests/data_validation/TargetRateBarPlots.py index 458006549..f6b8b3eee 100644 --- a/validmind/tests/data_validation/TargetRateBarPlots.py +++ b/validmind/tests/data_validation/TargetRateBarPlots.py @@ -6,7 +6,7 @@ import plotly.graph_objs as go from plotly.subplots import make_subplots -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -62,12 +62,13 @@ def TargetRateBarPlots(dataset: VMDataset): df = dataset.df figures = [] + raw_data = [] for col in dataset.feature_columns_categorical: - # Calculate counts and default rate for each category counts = df[col].value_counts() default_rate = df.groupby(col)[dataset.target_column].mean() + raw_data.append({"column": col, "counts": counts, "default_rate": default_rate}) fig = make_subplots( rows=1, @@ -107,4 +108,4 @@ def TargetRateBarPlots(dataset: VMDataset): figures.append(fig) - return tuple(figures) + return (*figures, RawData(target_rates_by_category=raw_data)) diff --git a/validmind/tests/data_validation/TimeSeriesFrequency.py b/validmind/tests/data_validation/TimeSeriesFrequency.py index 6ae02ae4c..aa34d7d75 100644 --- a/validmind/tests/data_validation/TimeSeriesFrequency.py +++ b/validmind/tests/data_validation/TimeSeriesFrequency.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -103,4 +103,9 @@ def TimeSeriesFrequency(dataset: VMDataset): ), ) - return frequencies, fig, len(set(item["Frequency"] for item in frequencies)) == 1 + return ( + frequencies, + fig, + len(set(item["Frequency"] for item in frequencies)) == 1, + RawData(time_diff_days=time_diff_days), + ) diff --git a/validmind/tests/data_validation/TimeSeriesHistogram.py b/validmind/tests/data_validation/TimeSeriesHistogram.py index fd38e1eee..ac4de3011 100644 --- a/validmind/tests/data_validation/TimeSeriesHistogram.py +++ b/validmind/tests/data_validation/TimeSeriesHistogram.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.express as px -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger logger = get_logger(__name__) @@ -64,6 +64,7 @@ def TimeSeriesHistogram(dataset, nbins=30): raise ValueError("Provided 'columns' must exist in the dataset") figures = [] + data_without_na = {} for col in columns: # Check for missing values and log if any are found missing_count = df[col].isna().sum() @@ -74,6 +75,7 @@ def TimeSeriesHistogram(dataset, nbins=30): # Drop missing values for the current column valid_data = df[~df[col].isna()] + data_without_na[col] = valid_data fig = px.histogram( valid_data, @@ -96,4 +98,4 @@ def TimeSeriesHistogram(dataset, nbins=30): ) figures.append(fig) - return tuple(figures) + return (*figures, RawData(data_without_na=data_without_na)) diff --git a/validmind/tests/data_validation/TimeSeriesLinePlot.py b/validmind/tests/data_validation/TimeSeriesLinePlot.py index 4df6f1472..3f99af57c 100644 --- a/validmind/tests/data_validation/TimeSeriesLinePlot.py +++ b/validmind/tests/data_validation/TimeSeriesLinePlot.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -78,4 +78,4 @@ def TimeSeriesLinePlot(dataset: VMDataset): figures.append(fig) - return tuple(figures) + return (*figures, RawData(time_series_data=df[dataset.feature_columns_numeric])) diff --git a/validmind/tests/data_validation/TimeSeriesMissingValues.py b/validmind/tests/data_validation/TimeSeriesMissingValues.py index 4abe70a18..be7199387 100644 --- a/validmind/tests/data_validation/TimeSeriesMissingValues.py +++ b/validmind/tests/data_validation/TimeSeriesMissingValues.py @@ -6,7 +6,7 @@ import plotly.express as px import plotly.figure_factory as ff -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -62,15 +62,18 @@ def TimeSeriesMissingValues(dataset: VMDataset, min_threshold: int = 1): if sum(missing.values) == 0: # if theres no missing values, no need to plot anything - return [ - { - "Column": col, - "Number of Missing Values": missing[col], - "Percentage of Missing Values (%)": 0, - "Pass/Fail": "Pass", - } - for col in missing.index - ], True + return ( + [ + { + "Column": col, + "Number of Missing Values": missing[col], + "Percentage of Missing Values (%)": 0, + "Pass/Fail": "Pass", + } + for col in missing.index + ], + True, + ) barplot = px.bar( missing, @@ -110,4 +113,5 @@ def TimeSeriesMissingValues(dataset: VMDataset, min_threshold: int = 1): barplot, heatmap, all(missing[col] < min_threshold for col in missing.index), + RawData(missing_values_count=missing, missing_values_mask=missing_mask), ) diff --git a/validmind/tests/data_validation/TimeSeriesOutliers.py b/validmind/tests/data_validation/TimeSeriesOutliers.py index 1a3fca83e..88a2fa2a6 100644 --- a/validmind/tests/data_validation/TimeSeriesOutliers.py +++ b/validmind/tests/data_validation/TimeSeriesOutliers.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -115,4 +115,5 @@ def TimeSeriesOutliers(dataset: VMDataset, zscore_threshold: int = 3): outlier_df.sort_values(["Column", "Date"]), figures, len(outlier_df) == 0, + RawData(z_scores=z_scores), ) diff --git a/validmind/tests/data_validation/WOEBinPlots.py b/validmind/tests/data_validation/WOEBinPlots.py index 852a20d72..3ac139338 100644 --- a/validmind/tests/data_validation/WOEBinPlots.py +++ b/validmind/tests/data_validation/WOEBinPlots.py @@ -9,7 +9,7 @@ import scorecardpy as sc from plotly.subplots import make_subplots -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -140,4 +140,4 @@ def WOEBinPlots( figures.append(fig) - return tuple(figures) + return (*figures, RawData(woe_iv_data=woe_iv_df)) diff --git a/validmind/tests/data_validation/WOEBinTable.py b/validmind/tests/data_validation/WOEBinTable.py index 42662d315..3c8e82172 100644 --- a/validmind/tests/data_validation/WOEBinTable.py +++ b/validmind/tests/data_validation/WOEBinTable.py @@ -5,7 +5,7 @@ import pandas as pd import scorecardpy as sc -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -61,12 +61,14 @@ def WOEBinTable(dataset: VMDataset, breaks_adj: list = None): except Exception as e: raise SkipTestError(f"Error during binning: {e}") + result_table = ( + pd.concat(bins.values(), keys=bins.keys()) + .reset_index() + .drop(columns=["variable"]) + .rename(columns={"level_0": "variable"}) + .assign(bin_number=lambda x: x.groupby("variable").cumcount()) + ) + return { - "Weight of Evidence (WoE) and Information Value (IV)": ( - pd.concat(bins.values(), keys=bins.keys()) - .reset_index() - .drop(columns=["variable"]) - .rename(columns={"level_0": "variable"}) - .assign(bin_number=lambda x: x.groupby("variable").cumcount()) - ) - } + "Weight of Evidence (WoE) and Information Value (IV)": result_table + }, RawData(bins=bins) diff --git a/validmind/tests/data_validation/ZivotAndrewsArch.py b/validmind/tests/data_validation/ZivotAndrewsArch.py index ec2b560c6..35f30aee6 100644 --- a/validmind/tests/data_validation/ZivotAndrewsArch.py +++ b/validmind/tests/data_validation/ZivotAndrewsArch.py @@ -6,7 +6,7 @@ from arch.unitroot import ZivotAndrews from numpy.linalg import LinAlgError -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -65,6 +65,7 @@ def ZivotAndrewsArch(dataset: VMDataset): df = df.apply(pd.to_numeric, errors="coerce") za_values = [] + raw_data = {} for col in df.columns: try: @@ -83,4 +84,14 @@ def ZivotAndrewsArch(dataset: VMDataset): } ) - return {"Zivot-Andrews Test Results": za_values} + # Store intermediate raw data for each column + raw_data[col] = { + "stat": za.stat, + "pvalue": za.pvalue, + "usedlag": za.lags, + "nobs": za.nobs, + } + + return {"Zivot-Andrews Test Results": za_values}, RawData( + zivot_andrews_results=raw_data + ) diff --git a/validmind/tests/data_validation/nlp/CommonWords.py b/validmind/tests/data_validation/nlp/CommonWords.py index 4009552aa..617ce7c3a 100644 --- a/validmind/tests/data_validation/nlp/CommonWords.py +++ b/validmind/tests/data_validation/nlp/CommonWords.py @@ -8,7 +8,7 @@ import plotly.graph_objects as go from nltk.corpus import stopwords -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -94,4 +94,4 @@ def create_corpus(df, text_column): xaxis_tickangle=-45, ) - return fig + return fig, RawData(word_frequencies=dict(most)) diff --git a/validmind/tests/data_validation/nlp/Hashtags.py b/validmind/tests/data_validation/nlp/Hashtags.py index 487318a32..4d292ee12 100644 --- a/validmind/tests/data_validation/nlp/Hashtags.py +++ b/validmind/tests/data_validation/nlp/Hashtags.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -76,4 +76,4 @@ def Hashtags(dataset: VMDataset, top_hashtags: int = 25): xaxis_tickangle=-45, ) - return fig + return fig, RawData(top_hashtag_counts=top_hashtag_counts) diff --git a/validmind/tests/data_validation/nlp/LanguageDetection.py b/validmind/tests/data_validation/nlp/LanguageDetection.py index 9db5d0840..a4300288f 100644 --- a/validmind/tests/data_validation/nlp/LanguageDetection.py +++ b/validmind/tests/data_validation/nlp/LanguageDetection.py @@ -5,7 +5,7 @@ import plotly.express as px from langdetect import LangDetectException, detect -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "visualization") @@ -64,9 +64,12 @@ def detect_language(text): languages = dataset.df[dataset.text_column].apply(detect_language) - return px.histogram( - languages, - x=languages, - title="Language Distribution", - labels={"x": "Language Codes"}, + return ( + px.histogram( + languages, + x=languages, + title="Language Distribution", + labels={"x": "Language Codes"}, + ), + RawData(detected_languages=languages), ) diff --git a/validmind/tests/data_validation/nlp/Mentions.py b/validmind/tests/data_validation/nlp/Mentions.py index bfda8a036..8eb63855f 100644 --- a/validmind/tests/data_validation/nlp/Mentions.py +++ b/validmind/tests/data_validation/nlp/Mentions.py @@ -7,7 +7,7 @@ import pandas as pd import plotly.express as px -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -75,9 +75,12 @@ def Mentions(dataset: VMDataset, top_mentions: int = 25): } ) - return px.treemap( - mention_frequencies_df, - path=["Scenario"], - values="Percentage", - title="Tree of Mentions", + return ( + px.treemap( + mention_frequencies_df, + path=["Scenario"], + values="Percentage", + title="Tree of Mentions", + ), + RawData(mention_counts=mention_counts), ) diff --git a/validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py b/validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py index 3e431ff26..596f2c386 100644 --- a/validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py +++ b/validmind/tests/data_validation/nlp/PolarityAndSubjectivity.py @@ -7,7 +7,7 @@ import plotly.express as px from textblob import TextBlob -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "data_validation") @@ -144,4 +144,4 @@ def PolarityAndSubjectivity(dataset, threshold_subjectivity=0.5, threshold_polar statistics_tables = {"Quadrant Distribution": quadrant_df, "Statistics": stats_df} - return fig, statistics_tables + return fig, statistics_tables, RawData(sentiment_data=data) diff --git a/validmind/tests/data_validation/nlp/Punctuations.py b/validmind/tests/data_validation/nlp/Punctuations.py index 1d8a6f65c..ea2d5fed6 100644 --- a/validmind/tests/data_validation/nlp/Punctuations.py +++ b/validmind/tests/data_validation/nlp/Punctuations.py @@ -11,7 +11,7 @@ import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "visualization", "frequency_analysis") @@ -63,7 +63,8 @@ def Punctuations(dataset, count_mode="token"): corpus = _create_corpus(dataset.df, dataset.text_column) punctuation_counts = _count_punctuations(corpus, count_mode) - return _create_punctuation_plot(punctuation_counts) + fig = _create_punctuation_plot(punctuation_counts) + return fig, RawData(punctuation_counts=dict(punctuation_counts)) def _create_punctuation_plot(punctuation_counts): diff --git a/validmind/tests/data_validation/nlp/Sentiment.py b/validmind/tests/data_validation/nlp/Sentiment.py index 69f94d52c..6abb052e2 100644 --- a/validmind/tests/data_validation/nlp/Sentiment.py +++ b/validmind/tests/data_validation/nlp/Sentiment.py @@ -8,7 +8,7 @@ import seaborn as sns from nltk.sentiment import SentimentIntensityAnalyzer -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "data_validation") @@ -77,4 +77,4 @@ def get_vader_sentiment(text): plt.close("all") - return fig + return fig, RawData(sentiment_scores=vader_sentiment.tolist()) diff --git a/validmind/tests/data_validation/nlp/StopWords.py b/validmind/tests/data_validation/nlp/StopWords.py index d41a7a60f..2d9eb11ce 100644 --- a/validmind/tests/data_validation/nlp/StopWords.py +++ b/validmind/tests/data_validation/nlp/StopWords.py @@ -13,7 +13,7 @@ import plotly.graph_objects as go from nltk.corpus import stopwords -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -124,4 +124,5 @@ def create_corpus(df, text_column): }, fig, passed, + RawData(stop_word_frequencies=dic, total_words=total_words), ) diff --git a/validmind/tests/data_validation/nlp/TextDescription.py b/validmind/tests/data_validation/nlp/TextDescription.py index 64ec63f68..352aba282 100644 --- a/validmind/tests/data_validation/nlp/TextDescription.py +++ b/validmind/tests/data_validation/nlp/TextDescription.py @@ -9,7 +9,7 @@ import plotly.express as px from nltk.corpus import stopwords -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset @@ -173,4 +173,4 @@ def TextDescription( ) ) - return tuple(figures) + return (*figures, RawData(metrics_dataframe=metrics_df)) diff --git a/validmind/tests/data_validation/nlp/Toxicity.py b/validmind/tests/data_validation/nlp/Toxicity.py index 260637631..ebb8fa92a 100644 --- a/validmind/tests/data_validation/nlp/Toxicity.py +++ b/validmind/tests/data_validation/nlp/Toxicity.py @@ -6,7 +6,7 @@ import matplotlib.pyplot as plt import seaborn as sns -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "data_validation") @@ -73,4 +73,4 @@ def Toxicity(dataset): plt.close() - return fig + return fig, RawData(toxicity_scores=toxicity_scores) diff --git a/validmind/tests/model_validation/BertScore.py b/validmind/tests/model_validation/BertScore.py index 24a13c31c..bd8257b19 100644 --- a/validmind/tests/model_validation/BertScore.py +++ b/validmind/tests/model_validation/BertScore.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.tests.utils import validate_prediction @@ -131,4 +131,4 @@ def BertScore( # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *tuple(figures)) + return (result_df, *figures, RawData(bert_scores_df=metrics_df)) diff --git a/validmind/tests/model_validation/BleuScore.py b/validmind/tests/model_validation/BleuScore.py index ca1863577..9a560a506 100644 --- a/validmind/tests/model_validation/BleuScore.py +++ b/validmind/tests/model_validation/BleuScore.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.tests.utils import validate_prediction @@ -114,4 +114,4 @@ def BleuScore(dataset, model): # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *tuple(figures)) + return (result_df, *figures, RawData(bleu_scores=metrics_df)) diff --git a/validmind/tests/model_validation/ClusterSizeDistribution.py b/validmind/tests/model_validation/ClusterSizeDistribution.py index 03e473bda..9dbb6b485 100644 --- a/validmind/tests/model_validation/ClusterSizeDistribution.py +++ b/validmind/tests/model_validation/ClusterSizeDistribution.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -72,4 +72,4 @@ def ClusterSizeDistribution(dataset: VMDataset, model: VMModel): fig.update_yaxes(title_text="Counts", showgrid=False) fig.update_layout(title_text="Cluster distribution", title_x=0.5, barmode="group") - return fig + return fig, RawData(cluster_counts=df_counts) diff --git a/validmind/tests/model_validation/ContextualRecall.py b/validmind/tests/model_validation/ContextualRecall.py index 24a1e2347..ad6df9a3e 100644 --- a/validmind/tests/model_validation/ContextualRecall.py +++ b/validmind/tests/model_validation/ContextualRecall.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.tests.utils import validate_prediction @@ -118,4 +118,4 @@ def ContextualRecall(dataset, model): # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *tuple(figures)) + return (result_df, *tuple(figures), RawData(contextual_recall_scores=metrics_df)) diff --git a/validmind/tests/model_validation/FeaturesAUC.py b/validmind/tests/model_validation/FeaturesAUC.py index a785db108..cd4ba5afd 100644 --- a/validmind/tests/model_validation/FeaturesAUC.py +++ b/validmind/tests/model_validation/FeaturesAUC.py @@ -7,7 +7,7 @@ import plotly.graph_objects as go from sklearn.metrics import roc_auc_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -95,4 +95,4 @@ def FeaturesAUC(dataset: VMDataset, fontsize: int = 12, figure_height: int = 500 height=figure_height, ) - return fig + return fig, RawData(feature_aucs=aucs) diff --git a/validmind/tests/model_validation/MeteorScore.py b/validmind/tests/model_validation/MeteorScore.py index 7a86ff0e4..052297810 100644 --- a/validmind/tests/model_validation/MeteorScore.py +++ b/validmind/tests/model_validation/MeteorScore.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.tests.utils import validate_prediction @@ -117,4 +117,4 @@ def MeteorScore(dataset, model): # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *tuple(figures)) + return (result_df, *tuple(figures), RawData(meteor_scores=metrics_df)) diff --git a/validmind/tests/model_validation/ModelPredictionResiduals.py b/validmind/tests/model_validation/ModelPredictionResiduals.py index 207256416..ceaa5fbf4 100644 --- a/validmind/tests/model_validation/ModelPredictionResiduals.py +++ b/validmind/tests/model_validation/ModelPredictionResiduals.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from scipy.stats import kstest -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("regression") @@ -102,4 +102,4 @@ def ModelPredictionResiduals( # Create a summary DataFrame for the KS normality test results summary_df = pd.DataFrame([summary]) - return (summary_df, *figures) + return (summary_df, *figures, RawData(residuals=residuals)) diff --git a/validmind/tests/model_validation/RegardScore.py b/validmind/tests/model_validation/RegardScore.py index 9eb64aee8..bcaa57d95 100644 --- a/validmind/tests/model_validation/RegardScore.py +++ b/validmind/tests/model_validation/RegardScore.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.tests.utils import validate_prediction @@ -142,4 +142,8 @@ def calculate_stats(df, metric_name): ] ] - return (result_df, *tuple(figures)) + return ( + result_df, + *figures, + RawData(true_regard=true_df, pred_regard=pred_df), + ) diff --git a/validmind/tests/model_validation/RegressionResidualsPlot.py b/validmind/tests/model_validation/RegressionResidualsPlot.py index 3f47af657..723bbbe53 100644 --- a/validmind/tests/model_validation/RegressionResidualsPlot.py +++ b/validmind/tests/model_validation/RegressionResidualsPlot.py @@ -6,7 +6,7 @@ import plotly.figure_factory as ff import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -60,8 +60,9 @@ def RegressionResidualsPlot(model: VMModel, dataset: VMDataset, bin_size: float figures = [] # Residuals plot + residuals = y_true.flatten() - y_pred.flatten() fig = ff.create_distplot( - hist_data=[y_true.flatten() - y_pred.flatten()], + hist_data=[residuals], group_labels=["Residuals"], bin_size=[bin_size], show_hist=True, @@ -104,4 +105,4 @@ def RegressionResidualsPlot(model: VMModel, dataset: VMDataset, bin_size: float ) ) - return tuple(figures) + return (*figures, RawData(residuals=residuals, y_true=y_true, y_pred=y_pred)) diff --git a/validmind/tests/model_validation/RougeScore.py b/validmind/tests/model_validation/RougeScore.py index c9ab82977..eccc38e9d 100644 --- a/validmind/tests/model_validation/RougeScore.py +++ b/validmind/tests/model_validation/RougeScore.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from rouge import Rouge -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "visualization") @@ -121,4 +121,7 @@ def RougeScore(dataset, model, metric="rouge-1"): # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *tuple(figures)) + # Include raw data + raw_data = RawData(score_list=score_list) + + return (result_df, *figures, raw_data) diff --git a/validmind/tests/model_validation/TimeSeriesPredictionWithCI.py b/validmind/tests/model_validation/TimeSeriesPredictionWithCI.py index f6cbd9050..329e3b42b 100644 --- a/validmind/tests/model_validation/TimeSeriesPredictionWithCI.py +++ b/validmind/tests/model_validation/TimeSeriesPredictionWithCI.py @@ -7,7 +7,7 @@ import plotly.graph_objects as go from scipy.stats import norm -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("model_predictions", "visualization") @@ -144,4 +144,13 @@ def TimeSeriesPredictionWithCI(dataset, model, confidence=0.95): template="plotly_white", ) - return fig, breaches_df + return ( + fig, + breaches_df, + RawData( + errors=errors, + z_score=z_score, + lower_confidence=lower_conf, + upper_confidence=upper_conf, + ), + ) diff --git a/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py b/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py index 6d2c84134..118cff2d7 100644 --- a/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py +++ b/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py @@ -4,7 +4,7 @@ import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("model_predictions", "visualization") @@ -70,4 +70,6 @@ def TimeSeriesPredictionsPlot(dataset, model): template="plotly_white", ) - return fig + return fig, RawData( + actual_values=dataset.y, predicted_values=y_pred, time_index=time_index + ) diff --git a/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py b/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py index 79d644e5f..e13b4f572 100644 --- a/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py +++ b/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py @@ -7,7 +7,7 @@ import plotly.express as px from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("model_performance", "sklearn") @@ -105,4 +105,4 @@ def TimeSeriesR2SquareBySegments(dataset, model, segments=None): }, ) - return fig, results_df + return fig, results_df, RawData(segments=segments) diff --git a/validmind/tests/model_validation/TokenDisparity.py b/validmind/tests/model_validation/TokenDisparity.py index e38822b20..89e654716 100644 --- a/validmind/tests/model_validation/TokenDisparity.py +++ b/validmind/tests/model_validation/TokenDisparity.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "visualization") @@ -108,4 +108,4 @@ def TokenDisparity(dataset, model): # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *tuple(figures)) + return (result_df, *figures, RawData(token_counts_df=df)) diff --git a/validmind/tests/model_validation/ToxicityScore.py b/validmind/tests/model_validation/ToxicityScore.py index 9f106154f..9ba8a9621 100644 --- a/validmind/tests/model_validation/ToxicityScore.py +++ b/validmind/tests/model_validation/ToxicityScore.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("nlp", "text_data", "visualization") @@ -139,4 +139,12 @@ def calculate_stats(df): ] ] - return (result_df, *tuple(figures)) + return ( + result_df, + *tuple(figures), + RawData( + input_toxicity=input_toxicity, + true_toxicity=true_toxicity, + pred_toxicity=pred_toxicity, + ), + ) diff --git a/validmind/tests/model_validation/embeddings/ClusterDistribution.py b/validmind/tests/model_validation/embeddings/ClusterDistribution.py index d5244f0a6..33140b39f 100644 --- a/validmind/tests/model_validation/embeddings/ClusterDistribution.py +++ b/validmind/tests/model_validation/embeddings/ClusterDistribution.py @@ -5,7 +5,7 @@ import plotly.express as px from sklearn.cluster import KMeans -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -52,8 +52,14 @@ def ClusterDistribution(model: VMModel, dataset: VMDataset, num_clusters: int = - Uses the KMeans clustering algorithm, which assumes that clusters are convex and isotropic, and may not work as intended if the true clusters in the data are not of this shape. """ - return px.histogram( - KMeans(n_clusters=num_clusters).fit(dataset.y_pred(model)).labels_, + embeddings = dataset.y_pred(model) + kmeans = KMeans(n_clusters=num_clusters).fit(embeddings) + labels = kmeans.labels_ + + fig = px.histogram( + labels, nbins=num_clusters, title="Embeddings Cluster Distribution", ) + + return fig, RawData(labels=labels) diff --git a/validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py b/validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py index 5f1c1a800..e38561db5 100644 --- a/validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py +++ b/validmind/tests/model_validation/embeddings/CosineSimilarityComparison.py @@ -9,7 +9,7 @@ import plotly.express as px from sklearn.metrics.pairwise import cosine_similarity -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "dimensionality_reduction", "embeddings") @@ -63,6 +63,7 @@ def CosineSimilarityComparison(dataset, models): figures = [] # Initialize a list to store data for the DataFrame all_stats = [] + similarity_matrices = [] # Generate all pairs of models for comparison for model_A, model_B in combinations(models, 2): @@ -73,6 +74,15 @@ def CosineSimilarityComparison(dataset, models): similarity_matrix = cosine_similarity(embeddings_A, embeddings_B) similarities = similarity_matrix.flatten() + # store similarity matrix + similarity_matrices.append( + { + "model_A": model_A.input_id, + "model_B": model_B.input_id, + "similarity_matrix": similarity_matrix, + } + ) + # Generate statistics and add model combination as a column stats_data = { "Combination": f"{model_A.input_id} vs {model_B.input_id}", @@ -100,4 +110,8 @@ def CosineSimilarityComparison(dataset, models): # Create a DataFrame from all collected statistics stats_df = pd.DataFrame(all_stats) - return (stats_df, *tuple(figures)) + return ( + *figures, + stats_df, + RawData(similarity_matrices=pd.DataFrame(similarity_matrices)), + ) diff --git a/validmind/tests/model_validation/embeddings/CosineSimilarityDistribution.py b/validmind/tests/model_validation/embeddings/CosineSimilarityDistribution.py index 7be1bd01f..484370d3e 100644 --- a/validmind/tests/model_validation/embeddings/CosineSimilarityDistribution.py +++ b/validmind/tests/model_validation/embeddings/CosineSimilarityDistribution.py @@ -5,7 +5,7 @@ import plotly.express as px from sklearn.metrics.pairwise import cosine_similarity -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -52,9 +52,11 @@ def CosineSimilarityDistribution(dataset: VMDataset, model: VMModel): - The output is sensitive to the choice of bin number for the histogram. Different bin numbers could give a slightly altered perspective on the distribution of cosine similarity. """ + similarity_scores = cosine_similarity(dataset.y_pred(model)).flatten() + return px.histogram( - x=cosine_similarity(dataset.y_pred(model)).flatten(), + x=similarity_scores, nbins=100, title="Cosine Similarity Distribution", labels={"x": "Cosine Similarity"}, - ) + ), RawData(similarity_scores=similarity_scores) diff --git a/validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py b/validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py index 94a2bb141..9f490187a 100644 --- a/validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py +++ b/validmind/tests/model_validation/embeddings/CosineSimilarityHeatmap.py @@ -6,7 +6,7 @@ import plotly.express as px from sklearn.metrics.pairwise import cosine_similarity -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "dimensionality_reduction", "embeddings") @@ -81,4 +81,4 @@ def CosineSimilarityHeatmap( yaxis_title=yaxis_title, ) - return fig + return fig, RawData(similarity_matrix=similarity_matrix) diff --git a/validmind/tests/model_validation/embeddings/DescriptiveAnalytics.py b/validmind/tests/model_validation/embeddings/DescriptiveAnalytics.py index a431da065..f2fd3aeba 100644 --- a/validmind/tests/model_validation/embeddings/DescriptiveAnalytics.py +++ b/validmind/tests/model_validation/embeddings/DescriptiveAnalytics.py @@ -5,7 +5,7 @@ import numpy as np import plotly.express as px -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -55,17 +55,27 @@ def DescriptiveAnalytics(dataset: VMDataset, model: VMModel): - While it displays valuable information about the central tendency and spread of data, it does not provide information about correlations between different embedding dimensions. """ + y_pred = dataset.y_pred(model) + embedding_means = np.mean(y_pred, axis=0) + embedding_medians = np.median(y_pred, axis=0) + embedding_stds = np.std(y_pred, axis=0) + return ( px.histogram( - x=np.mean(dataset.y_pred(model), axis=0), + x=embedding_means, title="Distribution of Embedding Means", ), px.histogram( - x=np.median(dataset.y_pred(model), axis=0), + x=embedding_medians, title="Distribution of Embedding Medians", ), px.histogram( - x=np.std(dataset.y_pred(model), axis=0), + x=embedding_stds, title="Distribution of Embedding Standard Deviations", ), + RawData( + embedding_means=embedding_means, + embedding_medians=embedding_medians, + embedding_stds=embedding_stds, + ), ) diff --git a/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py b/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py index c8dea04dd..fc599f9cb 100644 --- a/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py +++ b/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py @@ -7,7 +7,7 @@ import plotly.express as px from sklearn.manifold import TSNE -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -89,4 +89,4 @@ def EmbeddingsVisualization2D( fig = px.scatter(**scatter_kwargs) fig.update_layout(width=500, height=500) - return fig + return fig, RawData(reduced_embeddings=reduced_embeddings) diff --git a/validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py b/validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py index 26509b4e7..666f34c3f 100644 --- a/validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py +++ b/validmind/tests/model_validation/embeddings/EuclideanDistanceComparison.py @@ -9,7 +9,7 @@ import plotly.express as px from sklearn.metrics.pairwise import euclidean_distances -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "dimensionality_reduction", "embeddings") @@ -57,6 +57,8 @@ def EuclideanDistanceComparison(dataset, models): figures = [] all_stats = [] + distance_matrices = {} + # Generate all pairs of models for comparison for model_A, model_B in combinations(models, 2): embeddings_A = np.stack(dataset.y_pred(model_A)) @@ -66,6 +68,15 @@ def EuclideanDistanceComparison(dataset, models): distance_matrix = euclidean_distances(embeddings_A, embeddings_B) distances = distance_matrix.flatten() + # Store raw distance matrix for each pair-wise comparison + distance_matrices.append( + { + "model_A": model_A.input_id, + "model_B": model_B.input_id, + "distance_matrix": distance_matrix, + } + ) + # Generate statistics and add model combination as a column stats_data = { "Combination": f"{model_A.input_id} vs {model_B.input_id}", @@ -93,4 +104,7 @@ def EuclideanDistanceComparison(dataset, models): # Create a DataFrame from all collected statistics stats_df = pd.DataFrame(all_stats) - return (stats_df, *tuple(figures)) + # Add raw data to return + raw_data = RawData(distance_matrices=pd.DataFrame(distance_matrices)) + + return (stats_df, *figures, raw_data) diff --git a/validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py b/validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py index ca2de207d..430b96f42 100644 --- a/validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py +++ b/validmind/tests/model_validation/embeddings/EuclideanDistanceHeatmap.py @@ -6,7 +6,7 @@ import plotly.express as px from sklearn.metrics.pairwise import euclidean_distances -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "dimensionality_reduction", "embeddings") @@ -79,4 +79,4 @@ def EuclideanDistanceHeatmap( yaxis_title=yaxis_title, ) - return fig + return fig, RawData(distance_matrix=distance_matrix) diff --git a/validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py b/validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py index e6d7d4421..10a4308a9 100644 --- a/validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py +++ b/validmind/tests/model_validation/embeddings/PCAComponentsPairwisePlots.py @@ -10,7 +10,7 @@ from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "dimensionality_reduction", "embeddings") @@ -74,7 +74,7 @@ def PCAComponentsPairwisePlots(dataset, model, n_components=3): ) # List to store each plot - plots = [] + figures = [] # Create plots for each pair of principal components for pc1, pc2 in itertools.combinations(range(1, n_components + 1), 2): @@ -88,7 +88,6 @@ def PCAComponentsPairwisePlots(dataset, model, n_components=3): f"PC{pc2}": f"Principal Component {pc2}", }, ) - plots.append(fig) + figures.append(fig) - # Return the list of plots as a tuple - return tuple(plots) + return (*figures, RawData(pca_results=pca_df)) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py index 06726ce52..86356b3de 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py @@ -5,7 +5,7 @@ import re from typing import Dict -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel from .utils import create_stability_analysis_result @@ -91,8 +91,10 @@ def perturb_data(data: str): perturb_data ) - return create_stability_analysis_result( + results = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, ) + + return results, RawData(perturbed_data=perturbed_df) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py index 603f1bba5..e845a4fb7 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py @@ -5,7 +5,7 @@ import random import string -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel from .utils import create_stability_analysis_result @@ -145,8 +145,10 @@ def perturb_data(data): perturb_data ) - return create_stability_analysis_result( + result = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, ) + + return result, RawData(perturbed_text_data=perturbed_df) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py index 078cb93d8..fbdbd7d0e 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py @@ -7,7 +7,7 @@ import nltk from nltk.corpus import wordnet as wn -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel from .utils import create_stability_analysis_result @@ -105,4 +105,4 @@ def perturb_data(data): dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, - ) + ), RawData(original_data=original_df, perturbed_data=perturbed_df) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py index 500c70c15..bb0697018 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py @@ -4,7 +4,7 @@ from transformers import MarianMTModel, MarianTokenizer -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -128,8 +128,10 @@ def perturb_data(data): perturb_data ) - return create_stability_analysis_result( + result = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, ) + + return result, RawData(original_data=original_df, perturbed_data=perturbed_df) diff --git a/validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py b/validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py index 4687a023f..374049647 100644 --- a/validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py +++ b/validmind/tests/model_validation/embeddings/TSNEComponentsPairwisePlots.py @@ -10,7 +10,7 @@ from sklearn.manifold import TSNE from sklearn.preprocessing import StandardScaler -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "dimensionality_reduction", "embeddings") @@ -80,7 +80,7 @@ def TSNEComponentsPairwisePlots( ) # List to store each plot - plots = [] + figures = [] # Create plots for each pair of t-SNE components (if n_components > 1) if n_components > 1: @@ -95,7 +95,7 @@ def TSNEComponentsPairwisePlots( f"Component {comp2}": f"Component {comp2}", }, ) - plots.append(fig) + figures.append(fig) else: fig = px.scatter( tsne_df, @@ -106,7 +106,9 @@ def TSNEComponentsPairwisePlots( "Component 1": "Component 1", }, ) - plots.append(fig) + figures.append(fig) - # Return the list of plots as a tuple - return tuple(plots) + return ( + *figures, + RawData(embeddings_scaled=embeddings_scaled, tsne_results=tsne_results), + ) diff --git a/validmind/tests/model_validation/ragas/AnswerCorrectness.py b/validmind/tests/model_validation/ragas/AnswerCorrectness.py index b63b33709..38bed7282 100644 --- a/validmind/tests/model_validation/ragas/AnswerCorrectness.py +++ b/validmind/tests/model_validation/ragas/AnswerCorrectness.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -144,4 +144,5 @@ def AnswerCorrectness( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/AspectCritic.py b/validmind/tests/model_validation/ragas/AspectCritic.py index 6a265bb76..3a3ee097a 100644 --- a/validmind/tests/model_validation/ragas/AspectCritic.py +++ b/validmind/tests/model_validation/ragas/AspectCritic.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -187,9 +187,13 @@ def AspectCritic( title="Aspect Critique Results", ) - return { - "Aspect Scores": [ - {"Aspect": aspect, "Score": result_df[aspect].mean()} - for aspect in aspects + [aspect.name for aspect in custom_aspects] - ] - }, fig + return ( + { + "Aspect Scores": [ + {"Aspect": aspect, "Score": result_df[aspect].mean()} + for aspect in aspects + [aspect.name for aspect in custom_aspects] + ] + }, + fig, + RawData(evaluation_results=result_df), + ) diff --git a/validmind/tests/model_validation/ragas/ContextEntityRecall.py b/validmind/tests/model_validation/ragas/ContextEntityRecall.py index 9f2faf630..52459877c 100644 --- a/validmind/tests/model_validation/ragas/ContextEntityRecall.py +++ b/validmind/tests/model_validation/ragas/ContextEntityRecall.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -143,4 +143,5 @@ def ContextEntityRecall( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/ContextPrecision.py b/validmind/tests/model_validation/ragas/ContextPrecision.py index cc183d128..97e0fd74e 100644 --- a/validmind/tests/model_validation/ragas/ContextPrecision.py +++ b/validmind/tests/model_validation/ragas/ContextPrecision.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -135,4 +135,5 @@ def ContextPrecision( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/ContextPrecisionWithoutReference.py b/validmind/tests/model_validation/ragas/ContextPrecisionWithoutReference.py index 3c82d8f2c..307e1c36e 100644 --- a/validmind/tests/model_validation/ragas/ContextPrecisionWithoutReference.py +++ b/validmind/tests/model_validation/ragas/ContextPrecisionWithoutReference.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -130,4 +130,5 @@ def ContextPrecisionWithoutReference( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/ContextRecall.py b/validmind/tests/model_validation/ragas/ContextRecall.py index 539b123f6..55f8ad941 100644 --- a/validmind/tests/model_validation/ragas/ContextRecall.py +++ b/validmind/tests/model_validation/ragas/ContextRecall.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -135,4 +135,5 @@ def ContextRecall( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/Faithfulness.py b/validmind/tests/model_validation/ragas/Faithfulness.py index e5331f559..d20dc5b05 100644 --- a/validmind/tests/model_validation/ragas/Faithfulness.py +++ b/validmind/tests/model_validation/ragas/Faithfulness.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -140,4 +140,5 @@ def Faithfulness( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/NoiseSensitivity.py b/validmind/tests/model_validation/ragas/NoiseSensitivity.py index 4584d35d8..f68677a25 100644 --- a/validmind/tests/model_validation/ragas/NoiseSensitivity.py +++ b/validmind/tests/model_validation/ragas/NoiseSensitivity.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -179,4 +179,5 @@ def NoiseSensitivity( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/ResponseRelevancy.py b/validmind/tests/model_validation/ragas/ResponseRelevancy.py index 56a836a83..29f3fd041 100644 --- a/validmind/tests/model_validation/ragas/ResponseRelevancy.py +++ b/validmind/tests/model_validation/ragas/ResponseRelevancy.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -154,4 +154,5 @@ def ResponseRelevancy( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/ragas/SemanticSimilarity.py b/validmind/tests/model_validation/ragas/SemanticSimilarity.py index 93062a37f..a79cd0bf9 100644 --- a/validmind/tests/model_validation/ragas/SemanticSimilarity.py +++ b/validmind/tests/model_validation/ragas/SemanticSimilarity.py @@ -7,7 +7,7 @@ import plotly.express as px from datasets import Dataset -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingDependencyError from .utils import get_ragas_config, get_renamed_columns @@ -133,4 +133,5 @@ def SemanticSimilarity( }, fig_histogram, fig_box, + RawData(evaluation_results=result_df), ) diff --git a/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py b/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py index 71fa45394..83edafd95 100644 --- a/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py +++ b/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py @@ -4,7 +4,7 @@ from sklearn.metrics import adjusted_mutual_info_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -52,11 +52,8 @@ def AdjustedMutualInformation(model: VMModel, dataset: VMDataset): - The interpretability of the score can be complex as it depends on the understanding of information theory concepts. """ - return [ - { - "Adjusted Mutual Information": adjusted_mutual_info_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - } - ] + ami_score = adjusted_mutual_info_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + return [{"Adjusted Mutual Information": ami_score}], RawData(ami_score=ami_score) diff --git a/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py b/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py index 6fd3cb181..96138287f 100644 --- a/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py +++ b/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py @@ -4,7 +4,7 @@ from sklearn.metrics import adjusted_rand_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -49,11 +49,9 @@ def AdjustedRandIndex(model: VMModel, dataset: VMDataset): - It may be difficult to interpret the implications of an ARI score without context or a benchmark, as it is heavily dependent on the characteristics of the dataset used. """ - return [ - { - "Adjusted Rand Index": adjusted_rand_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - } - ] + ari_score = adjusted_rand_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + + return [{"Adjusted Rand Index": ari_score}], RawData(ari_score=ari_score) diff --git a/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py b/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py index 0a4d4f442..30abe7796 100644 --- a/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py +++ b/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py @@ -8,7 +8,7 @@ from plotly.subplots import make_subplots from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -255,4 +255,15 @@ def ClassifierThresholdOptimization( # Create results table and sort by threshold descending table = pd.DataFrame(results).sort_values("threshold", ascending=False) - return fig, table + return ( + fig, + table, + RawData( + roc_data={"fpr": fpr, "tpr": tpr, "thresholds_roc": thresholds_roc}, + pr_data={ + "precision": precision, + "recall": recall, + "thresholds_pr": thresholds_pr, + }, + ), + ) diff --git a/validmind/tests/model_validation/sklearn/ClusterCosineSimilarity.py b/validmind/tests/model_validation/sklearn/ClusterCosineSimilarity.py index fc55a439e..999625539 100644 --- a/validmind/tests/model_validation/sklearn/ClusterCosineSimilarity.py +++ b/validmind/tests/model_validation/sklearn/ClusterCosineSimilarity.py @@ -5,7 +5,7 @@ import numpy as np from sklearn.metrics.pairwise import cosine_similarity -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset, VMModel @@ -61,11 +61,14 @@ def ClusterCosineSimilarity(model: VMModel, dataset: VMDataset): table = [] + cluster_centroids = {} + for cluster_idx in range(num_clusters): cluster_data = dataset.x[y_pred == cluster_idx] if cluster_data.size != 0: cluster_centroid = np.mean(cluster_data, axis=0) + cluster_centroids[cluster_idx] = cluster_centroid table.append( { "Cluster": cluster_idx, @@ -81,4 +84,4 @@ def ClusterCosineSimilarity(model: VMModel, dataset: VMDataset): if not table: raise SkipTestError("No clusters found") - return table + return table, RawData(cluster_centroids=cluster_centroids) diff --git a/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py b/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py index e3d23ffe3..40f5fe57b 100644 --- a/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py +++ b/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py @@ -11,7 +11,7 @@ v_measure_score, ) -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel HOMOGENEITY = """ @@ -115,53 +115,56 @@ def ClusterPerformanceMetrics(model: VMModel, dataset: VMDataset): - Does not consider aspects like computational efficiency of the model or its capability to handle high dimensional data. """ + labels_true = dataset.y + labels_pred = dataset.y_pred(model) + return [ { "Metric": "Homogeneity Score", "Description": HOMOGENEITY, "Value": homogeneity_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), + labels_true=labels_true, + labels_pred=labels_pred, ), }, { "Metric": "Completeness Score", "Description": COMPLETENESS, "Value": completeness_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), + labels_true=labels_true, + labels_pred=labels_pred, ), }, { "Metric": "V Measure", "Description": V_MEASURE, "Value": v_measure_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), + labels_true=labels_true, + labels_pred=labels_pred, ), }, { "Metric": "Adjusted Rand Index", "Description": ADJUSTED_RAND_INDEX, "Value": adjusted_rand_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), + labels_true=labels_true, + labels_pred=labels_pred, ), }, { "Metric": "Adjusted Mutual Information", "Description": ADJUSTED_MUTUAL_INFORMATION, "Value": adjusted_mutual_info_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), + labels_true=labels_true, + labels_pred=labels_pred, ), }, { "Metric": "Fowlkes-Mallows score", "Description": FOULKES_MALLOWS_SCORE, "Value": fowlkes_mallows_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), + labels_true=labels_true, + labels_pred=labels_pred, ), }, - ] + ], RawData(labels_true=labels_true, labels_pred=labels_pred) diff --git a/validmind/tests/model_validation/sklearn/CompletenessScore.py b/validmind/tests/model_validation/sklearn/CompletenessScore.py index 26272822e..de5b916cf 100644 --- a/validmind/tests/model_validation/sklearn/CompletenessScore.py +++ b/validmind/tests/model_validation/sklearn/CompletenessScore.py @@ -4,7 +4,7 @@ from sklearn.metrics import completeness_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -47,11 +47,14 @@ def CompletenessScore(model: VMModel, dataset: VMDataset): - The Completeness Score only applies to clustering models; it cannot be used for other types of machine learning models. """ - return [ - { - "Completeness Score": completeness_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - } - ] + labels_true = dataset.y + labels_pred = dataset.y_pred(model) + + completeness = completeness_score( + labels_true=labels_true, + labels_pred=labels_pred, + ) + + return [{"Completeness Score": completeness}], RawData( + labels_true=labels_true, labels_pred=labels_pred + ) diff --git a/validmind/tests/model_validation/sklearn/ConfusionMatrix.py b/validmind/tests/model_validation/sklearn/ConfusionMatrix.py index 6d1db3f41..050b70f54 100644 --- a/validmind/tests/model_validation/sklearn/ConfusionMatrix.py +++ b/validmind/tests/model_validation/sklearn/ConfusionMatrix.py @@ -7,7 +7,7 @@ import plotly.figure_factory as ff from sklearn.metrics import confusion_matrix -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -119,4 +119,4 @@ def ConfusionMatrix(dataset: VMDataset, model: VMModel): font=dict(size=14), ) - return fig + return fig, RawData(confusion_matrix=cm) diff --git a/validmind/tests/model_validation/sklearn/FeatureImportance.py b/validmind/tests/model_validation/sklearn/FeatureImportance.py index 677b62017..b73da1c00 100644 --- a/validmind/tests/model_validation/sklearn/FeatureImportance.py +++ b/validmind/tests/model_validation/sklearn/FeatureImportance.py @@ -5,7 +5,7 @@ import pandas as pd from sklearn.inspection import permutation_importance -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -91,4 +91,5 @@ def FeatureImportance(dataset: VMDataset, model: VMModel, num_features: int = 3) # Convert the results list to a DataFrame results_df = pd.DataFrame(results_list) - return results_df + + return results_df, RawData(permutation_importance_scores=pfi_values) diff --git a/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py b/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py index 04b9b08b3..ff852432f 100644 --- a/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py +++ b/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py @@ -52,11 +52,11 @@ def FowlkesMallowsScore(dataset: VMDataset, model: VMModel): - It does not handle mismatching numbers of clusters between the true and predicted labels. As such, it may return misleading results if the predicted labels suggest a different number of clusters than what is in the true labels. """ - return [ - { - "Fowlkes-Mallows score": metrics.fowlkes_mallows_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - } - ] + score = metrics.fowlkes_mallows_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + + return [{"Fowlkes-Mallows score": score}], RawData( + true_labels=dataset.y, predicted_labels=dataset.y_pred(model) + ) diff --git a/validmind/tests/model_validation/sklearn/HomogeneityScore.py b/validmind/tests/model_validation/sklearn/HomogeneityScore.py index 1fedd83bd..fd595ac07 100644 --- a/validmind/tests/model_validation/sklearn/HomogeneityScore.py +++ b/validmind/tests/model_validation/sklearn/HomogeneityScore.py @@ -4,7 +4,7 @@ from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -50,11 +50,13 @@ class labels of the training and testing sets with the labels predicted by the g - The score does not address the actual number of clusters formed, or the evenness of cluster sizes. It only checks the homogeneity within the given clusters created by the model. """ - return [ - { - "Homogeneity Score": metrics.homogeneity_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - } - ] + + homogeneity_score = metrics.homogeneity_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + + return ( + [{"Homogeneity Score": homogeneity_score}], + RawData(labels_true=dataset.y, labels_pred=dataset.y_pred(model)), + ) diff --git a/validmind/tests/model_validation/sklearn/HyperParametersTuning.py b/validmind/tests/model_validation/sklearn/HyperParametersTuning.py index dd90a44af..4e8cc34d4 100644 --- a/validmind/tests/model_validation/sklearn/HyperParametersTuning.py +++ b/validmind/tests/model_validation/sklearn/HyperParametersTuning.py @@ -7,7 +7,7 @@ from sklearn.metrics import make_scorer, recall_score from sklearn.model_selection import GridSearchCV -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -135,6 +135,8 @@ def HyperParametersTuning( metrics = _get_metrics(scoring) thresholds = _get_thresholds(thresholds) + raw_data = {} + for threshold in thresholds: scoring_dict = _create_scoring_dict(scoring, metrics, threshold) @@ -162,4 +164,8 @@ def HyperParametersTuning( results.append(row_result) - return results + # Store intermediate data for each (optimize_for, threshold) combination + raw_data_key = f"{optimize_for}_threshold_{threshold}" + raw_data[raw_data_key] = estimators.cv_results_ + + return results, RawData(grid_search_results=raw_data) diff --git a/validmind/tests/model_validation/sklearn/KMeansClustersOptimization.py b/validmind/tests/model_validation/sklearn/KMeansClustersOptimization.py index 9fa715e02..f26bddb6a 100644 --- a/validmind/tests/model_validation/sklearn/KMeansClustersOptimization.py +++ b/validmind/tests/model_validation/sklearn/KMeansClustersOptimization.py @@ -11,7 +11,7 @@ from sklearn import clone from sklearn.metrics import silhouette_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset, VMModel @@ -124,4 +124,4 @@ def KMeansClustersOptimization( fig.update_layout(showlegend=False) - return fig + return fig, RawData(distortions=distortions, silhouette_avg=silhouette_avg) diff --git a/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py b/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py index fd55ff8ec..f4a00c55c 100644 --- a/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py +++ b/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py @@ -6,6 +6,7 @@ from sklearn.metrics import roc_auc_score from sklearn.preprocessing import LabelBinarizer +from validmind import RawData from validmind.tests import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -62,19 +63,26 @@ def MinimumROCAUCScore(dataset: VMDataset, model: VMModel, min_threshold: float lb = LabelBinarizer() lb.fit(y_true) + y_true_binarized = lb.transform(y_true) + y_score_binarized = lb.transform(dataset.y_pred(model)) + roc_auc = roc_auc_score( - y_true=lb.transform(y_true), - y_score=lb.transform(dataset.y_pred(model)), + y_true=y_true_binarized, + y_score=y_score_binarized, average="macro", ) else: - roc_auc = roc_auc_score(y_true=y_true, y_score=dataset.y_prob(model)) + y_score_prob = dataset.y_prob(model) + roc_auc = roc_auc_score(y_true=y_true, y_score=y_score_prob) - return [ + table = [ { "Score": roc_auc, "Threshold": min_threshold, "Pass/Fail": "Pass" if roc_auc > min_threshold else "Fail", } - ], roc_auc > min_threshold + ] + pass_fail = roc_auc > min_threshold + + return table, pass_fail, RawData(y_true=y_true, roc_auc=roc_auc) diff --git a/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py b/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py index 65cb29d75..96506801d 100644 --- a/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +++ b/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py @@ -10,7 +10,7 @@ import seaborn as sns from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -242,7 +242,7 @@ def OverfitDiagnosis( test_df[prob_column] = datasets[1].y_prob(model) test_results = [] - test_figures = [] + figures = [] results_headers = ["slice", "shape", "feature", metric] for feature_column in datasets[0].feature_columns: @@ -283,7 +283,7 @@ def OverfitDiagnosis( ) results = _prepare_results(results_train, results_test, metric) - test_figures.append( + figures.append( _plot_overfit_regions(results, feature_column, cut_off_threshold, metric) ) @@ -299,4 +299,8 @@ def OverfitDiagnosis( } ) - return {"Overfit Diagnosis": test_results}, *test_figures + return ( + {"Overfit Diagnosis": test_results}, + *figures, + RawData(train_metrics=results_train, test_metrics=results_test), + ) diff --git a/validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py b/validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py index 81be11049..f65f8955e 100644 --- a/validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py +++ b/validmind/tests/model_validation/sklearn/PermutationFeatureImportance.py @@ -7,7 +7,7 @@ import plotly.graph_objects as go from sklearn.inspection import permutation_importance -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -111,4 +111,4 @@ def PermutationFeatureImportance( height=figure_height, ) - return fig + return fig, RawData(permutation_importance=pfi_values) diff --git a/validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py b/validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py index 8fb9fb473..c6be015f5 100644 --- a/validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py +++ b/validmind/tests/model_validation/sklearn/PopulationStabilityIndex.py @@ -8,7 +8,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -192,18 +192,22 @@ def PopulationStabilityIndex( table_title = f"Population Stability Index for {datasets[0].input_id} and {datasets[1].input_id} Datasets" - return { - table_title: [ - { - "Bin": ( - i if i < (len(psi_results) - 1) else "Total" - ), # The last bin is the "Total" bin - "Count Initial": values["initial"], - "Percent Initial (%)": values["percent_initial"] * 100, - "Count New": values["new"], - "Percent New (%)": values["percent_new"] * 100, - "PSI": values["psi"], - } - for i, values in enumerate(psi_results) - ], - }, fig + return ( + { + table_title: [ + { + "Bin": ( + i if i < (len(psi_results) - 1) else "Total" + ), # The last bin is the "Total" bin + "Count Initial": values["initial"], + "Percent Initial (%)": values["percent_initial"] * 100, + "Count New": values["new"], + "Percent New (%)": values["percent_new"] * 100, + "PSI": values["psi"], + } + for i, values in enumerate(psi_results) + ], + }, + fig, + RawData(psi_raw=psi_results), + ) diff --git a/validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py b/validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py index 2185921da..8a722fa8b 100644 --- a/validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py +++ b/validmind/tests/model_validation/sklearn/PrecisionRecallCurve.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from sklearn.metrics import precision_recall_curve -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.models import FoundationModel from validmind.vm_models import VMDataset, VMModel @@ -66,7 +66,7 @@ def PrecisionRecallCurve(model: VMModel, dataset: VMDataset): precision, recall, _ = precision_recall_curve(y_true, dataset.y_prob(model)) - return go.Figure( + fig = go.Figure( data=[ go.Scatter( x=recall, @@ -82,3 +82,5 @@ def PrecisionRecallCurve(model: VMModel, dataset: VMDataset): yaxis=dict(title="Precision"), ), ) + + return fig, RawData(precision=precision, recall=recall) diff --git a/validmind/tests/model_validation/sklearn/ROCCurve.py b/validmind/tests/model_validation/sklearn/ROCCurve.py index 7113d0bc1..cb7314953 100644 --- a/validmind/tests/model_validation/sklearn/ROCCurve.py +++ b/validmind/tests/model_validation/sklearn/ROCCurve.py @@ -78,7 +78,6 @@ def ROCCurve(model: VMModel, dataset: VMDataset): auc = roc_auc_score(y_true, y_prob) return ( - RawData(fpr=fpr, tpr=tpr, auc=auc), go.Figure( data=[ go.Scatter( @@ -104,4 +103,5 @@ def ROCCurve(model: VMModel, dataset: VMDataset): height=500, ), ), + RawData(fpr=fpr, tpr=tpr, auc=auc), ) diff --git a/validmind/tests/model_validation/sklearn/RegressionErrors.py b/validmind/tests/model_validation/sklearn/RegressionErrors.py index f398d6d21..a819f1737 100644 --- a/validmind/tests/model_validation/sklearn/RegressionErrors.py +++ b/validmind/tests/model_validation/sklearn/RegressionErrors.py @@ -6,7 +6,7 @@ import pandas as pd from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("sklearn", "model_performance") @@ -83,4 +83,4 @@ def _regression_errors(y_true, y_pred): } ) - return results_df + return results_df, RawData(y_true=y_true, y_pred=y_pred) diff --git a/validmind/tests/model_validation/sklearn/RegressionPerformance.py b/validmind/tests/model_validation/sklearn/RegressionPerformance.py index 2ffdbd6cf..d389ce96d 100644 --- a/validmind/tests/model_validation/sklearn/RegressionPerformance.py +++ b/validmind/tests/model_validation/sklearn/RegressionPerformance.py @@ -5,7 +5,7 @@ import numpy as np from sklearn.metrics import mean_absolute_error, mean_squared_error -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -80,4 +80,4 @@ def RegressionPerformance(model: VMModel, dataset: VMDataset): "Value": value, } for metric, value in metrics.items() - ] + ], RawData(y_true=y_true, y_pred=y_pred) diff --git a/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py b/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py index 5e5eb2fdd..f5990dbff 100644 --- a/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py +++ b/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py @@ -5,7 +5,7 @@ import pandas as pd from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.tests.model_validation.statsmodels.statsutils import adj_r2_score @@ -79,4 +79,4 @@ def RegressionR2SquareComparison(datasets, models): # Convert results list to a DataFrame results_df = pd.DataFrame(results_list) - return results_df + return results_df, RawData(r2_values=results_df) diff --git a/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py b/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py index f6d855f1a..b4a36635f 100644 --- a/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +++ b/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py @@ -9,7 +9,7 @@ import numpy as np import shap -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import UnsupportedModelForSHAPError from validmind.logging import get_logger from validmind.models import CatBoostModel, SKlearnModel, StatsModelsModel @@ -229,4 +229,5 @@ def SHAPGlobalImportance( return ( generate_shap_plot("mean", shap_values, shap_sample), generate_shap_plot("summary", shap_values, shap_sample), + RawData(shap_values=shap_values), ) diff --git a/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py b/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py index 7246ca573..28e86f900 100644 --- a/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py +++ b/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -128,4 +128,4 @@ def ScoreProbabilityAlignment( height=600, ) - return results_df, fig + return results_df, fig, RawData(score_bin_data=df) diff --git a/validmind/tests/model_validation/sklearn/SilhouettePlot.py b/validmind/tests/model_validation/sklearn/SilhouettePlot.py index b95d4c599..836c8791b 100644 --- a/validmind/tests/model_validation/sklearn/SilhouettePlot.py +++ b/validmind/tests/model_validation/sklearn/SilhouettePlot.py @@ -6,7 +6,7 @@ import numpy as np from sklearn.metrics import silhouette_samples, silhouette_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -105,8 +105,10 @@ def SilhouettePlot(model: VMModel, dataset: VMDataset): plt.close() - return [ + return ( { "Silhouette Score": silhouette_avg, }, - ], fig + fig, + RawData(sample_silhouette_values=sample_silhouette_values), + ) diff --git a/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py b/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py index d64ce5294..61b6313a3 100644 --- a/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py +++ b/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py @@ -7,6 +7,7 @@ from numpy import unique from sklearn.metrics import classification_report +from validmind import RawData from validmind.tests import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -98,4 +99,11 @@ def TrainingTestDegradation( } ) - return table, all(row["Pass/Fail"] == "Pass" for row in table) + return ( + table, + all(row["Pass/Fail"] == "Pass" for row in table), + RawData( + train_classification_report=ds1_report, + test_classification_report=ds2_report, + ), + ) diff --git a/validmind/tests/model_validation/sklearn/VMeasure.py b/validmind/tests/model_validation/sklearn/VMeasure.py index 2a86c7390..79cb569ee 100644 --- a/validmind/tests/model_validation/sklearn/VMeasure.py +++ b/validmind/tests/model_validation/sklearn/VMeasure.py @@ -4,7 +4,7 @@ from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -48,11 +48,12 @@ def VMeasure(dataset: VMDataset, model: VMModel): the other. The V Measure Score does not provide flexibility in assigning different weights to homogeneity and completeness. """ - return [ - { - "V Measure": metrics.v_measure_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - } - ] + v_measure = metrics.v_measure_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + + # Store raw data needed to recalculate V Measure + raw_data = RawData(labels_true=dataset.y, labels_pred=dataset.y_pred(model)) + + return [{"V Measure": v_measure}], raw_data diff --git a/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py b/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py index 309226d1a..63838adec 100644 --- a/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py +++ b/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py @@ -9,6 +9,7 @@ import seaborn as sns from sklearn import metrics +from validmind import RawData from validmind.tests import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -294,4 +295,8 @@ def WeakspotsDiagnosis( ).sort_values(["Feature", "Dataset"]), *figures, passed, + RawData( + metrics_results_training=pd.DataFrame(results_1), + metrics_results_test=pd.DataFrame(results_2), + ), ) diff --git a/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py b/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py index 48fe9c878..0ad422278 100644 --- a/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py +++ b/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from matplotlib import cm -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "credit_risk") @@ -64,7 +64,7 @@ def CumulativePredictionProbabilities(dataset, model, title="Cumulative Probabil fig = _plot_cumulative_prob(df, dataset.target_column, title) - return fig + return fig, RawData(probabilities_df=df) def _plot_cumulative_prob(df, target_col, title): diff --git a/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py b/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py index 72fafdbd4..a252aa336 100644 --- a/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py +++ b/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.stattools import durbin_watson -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tasks("regression") @@ -83,4 +83,4 @@ def get_autocorrelation(dw_value, threshold): } ) - return results + return results, RawData(residuals=residuals) diff --git a/validmind/tests/model_validation/statsmodels/GINITable.py b/validmind/tests/model_validation/statsmodels/GINITable.py index 6089f4b9b..fb476e35e 100644 --- a/validmind/tests/model_validation/statsmodels/GINITable.py +++ b/validmind/tests/model_validation/statsmodels/GINITable.py @@ -6,7 +6,7 @@ import pandas as pd from sklearn.metrics import roc_auc_score, roc_curve -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("model_performance") @@ -84,4 +84,5 @@ def GINITable(dataset, model): # Create a DataFrame to store and return the results metrics_df = pd.DataFrame(metrics_dict) - return metrics_df + + return metrics_df, RawData(true_positive_rate=tpr, false_positive_rate=fpr) diff --git a/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py b/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py index f4042d2c7..ea563f141 100644 --- a/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py +++ b/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py @@ -4,7 +4,7 @@ from statsmodels.stats.diagnostic import kstest_normal -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import InvalidTestParametersError from validmind.vm_models import VMDataset, VMModel @@ -66,4 +66,4 @@ def KolmogorovSmirnov(model: VMModel, dataset: VMDataset, dist: str = "norm"): "P-Value": result["pvalue"], } for k, result in ks_values.items() - ] + ], RawData(ks_results=ks_values) diff --git a/validmind/tests/model_validation/statsmodels/Lilliefors.py b/validmind/tests/model_validation/statsmodels/Lilliefors.py index e5dc73fd3..fc1c1f029 100644 --- a/validmind/tests/model_validation/statsmodels/Lilliefors.py +++ b/validmind/tests/model_validation/statsmodels/Lilliefors.py @@ -4,7 +4,7 @@ from statsmodels.stats.diagnostic import lilliefors -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -70,4 +70,4 @@ def Lilliefors(model: VMModel, dataset: VMDataset): } ) - return table + return table, RawData(test_statistics=table) diff --git a/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py b/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py index 964ab5aca..db4f2b152 100644 --- a/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py +++ b/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from matplotlib import cm -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "credit_risk") @@ -60,7 +60,7 @@ def PredictionProbabilitiesHistogram( fig = _plot_prob_histogram(df, dataset.target_column, title) - return fig + return fig, RawData(probabilities_df=df) def _plot_prob_histogram(df, target_col, title): diff --git a/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py b/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py index 789a6b31b..f90070719 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py +++ b/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py @@ -7,7 +7,7 @@ import plotly.graph_objects as go from scipy import stats -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError @@ -97,4 +97,4 @@ def RegressionCoeffs(model): yaxis_title="Coefficients", ) - return (fig, coefficients) + return fig, RawData(coefficients=coefficients) diff --git a/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py b/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py index 16ae7e639..4711dced7 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py +++ b/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import seaborn as sns -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMModel @@ -90,4 +90,6 @@ def RegressionFeatureSignificance( plt.close() - return fig + raw_data = RawData(coefficients=coefficients, pvalues=pvalues) + + return fig, raw_data diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py index 820202f16..841e9660c 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py @@ -7,7 +7,7 @@ import matplotlib.pyplot as plt import pandas as pd -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -87,4 +87,12 @@ def RegressionModelForecastPlot( plt.close() - return fig + # Prepare raw data + raw_data = RawData( + observed_values=pd.DataFrame({"index": index, "observed": dataset.y}), + forecasted_values=pd.DataFrame( + {"index": index, "forecast": dataset.y_pred(model)} + ), + ) + + return fig, raw_data diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py index 84ece1dba..04bbeb4fa 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py @@ -6,7 +6,7 @@ import numpy as np import pandas as pd -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -91,4 +91,7 @@ def RegressionModelForecastPlotLevels( plt.close() - return fig + return fig, RawData( + dataset_y_transformed=dataset_y_transformed, + y_pred_transformed=y_pred_transformed, + ) diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py b/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py index 123b1b157..f022e9d39 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py @@ -7,7 +7,7 @@ import matplotlib.pyplot as plt import numpy as np -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -111,4 +111,4 @@ def RegressionModelSensitivityPlot( plt.close() - return fig + return fig, RawData(shocked_dfs=shocked_dfs, predictions=predictions) diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py b/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py index f0daac967..7a374a198 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py @@ -4,7 +4,7 @@ from sklearn.metrics import mean_squared_error, r2_score -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel from .statsutils import adj_r2_score @@ -48,7 +48,7 @@ def RegressionModelSummary(dataset: VMDataset, model: VMModel): y_true = dataset.y y_pred = dataset.y_pred(model) - return [ + results = [ { "Independent Variables": dataset.feature_columns, "R-Squared": r2_score(y_true, y_pred), @@ -59,3 +59,5 @@ def RegressionModelSummary(dataset: VMDataset, model: VMModel): "RMSE": mean_squared_error(y_true=y_true, y_pred=y_pred, squared=False), } ] + + return results, RawData(y_true=y_true, y_pred=y_pred) diff --git a/validmind/tests/model_validation/statsmodels/RegressionPermutationFeatureImportance.py b/validmind/tests/model_validation/statsmodels/RegressionPermutationFeatureImportance.py index ea6954227..cb314ab0b 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionPermutationFeatureImportance.py +++ b/validmind/tests/model_validation/statsmodels/RegressionPermutationFeatureImportance.py @@ -8,7 +8,7 @@ from sklearn.metrics import r2_score from sklearn.utils import check_random_state -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -97,4 +97,4 @@ def RegressionPermutationFeatureImportance( height=figure_height, ) - return fig + return fig, RawData(importances=importances) diff --git a/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py b/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py index 29678e5af..74141cb86 100644 --- a/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py +++ b/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py @@ -2,10 +2,11 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial +import pandas as pd import plotly.graph_objects as go from matplotlib import cm -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization", "credit_risk", "logistic_regression") @@ -66,7 +67,7 @@ def ScorecardHistogram(dataset, title="Histogram of Scores", score_column="score fig = _plot_score_histogram(df, score_column, dataset.target_column, title) - return fig + return fig, RawData(score_data=df[[score_column, dataset.target_column]]) def _plot_score_histogram(df, score_col, target_col, title): diff --git a/validmind/tests/ongoing_monitoring/FeatureDrift.py b/validmind/tests/ongoing_monitoring/FeatureDrift.py index 771e2a186..acee32df2 100644 --- a/validmind/tests/ongoing_monitoring/FeatureDrift.py +++ b/validmind/tests/ongoing_monitoring/FeatureDrift.py @@ -7,7 +7,7 @@ import numpy as np import pandas as pd -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization") @@ -109,7 +109,11 @@ def nest(d: dict) -> dict: final_psi = pd.DataFrame(psi_table) - return (final_psi, *save_fig) + return ( + final_psi, + *save_fig, + RawData(psi_quantiles=PSI_QUANTILES, psi_bucket_frac=PSI_BUCKET_FRAC), + ) def get_psi_buckets(x_test_df, x_train_df, feature_columns, bins, PSI_QUANTILES): diff --git a/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py b/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py index 87f059e75..0e4035e2e 100644 --- a/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py +++ b/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization") @@ -56,7 +56,7 @@ def PredictionAcrossEachFeature(datasets, model): df_reference = datasets[0]._df df_monitoring = datasets[1]._df - figures_to_save = [] + figures = [] for column in df_reference: prediction_prob_column = f"{model.input_id}_probabilities" prediction_column = f"{model.input_id}_prediction" @@ -76,7 +76,7 @@ def PredictionAcrossEachFeature(datasets, model): ax2.set_title("Monitoring") ax2.set_xlabel(column) - figures_to_save.append(fig) + figures.append(fig) plt.close() - return tuple(figures_to_save) + return (*figures, RawData(df_reference=df_reference, df_monitoring=df_monitoring)) diff --git a/validmind/tests/ongoing_monitoring/PredictionCorrelation.py b/validmind/tests/ongoing_monitoring/PredictionCorrelation.py index 547425c33..8596b73e6 100644 --- a/validmind/tests/ongoing_monitoring/PredictionCorrelation.py +++ b/validmind/tests/ongoing_monitoring/PredictionCorrelation.py @@ -6,7 +6,7 @@ import matplotlib.pyplot as plt import numpy as np -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization") @@ -98,4 +98,9 @@ def PredictionCorrelation(datasets, model): corr_final = corr_final[ ["Features", "Reference Predictions", "Monitoring Predictions"] ] - return ({"Correlation Pair Table": corr_final}, fig) + + return ( + {"Correlation Pair Table": corr_final}, + fig, + RawData(reference_corr=df_corr, monitoring_corr=df_corr2), + ) diff --git a/validmind/tests/ongoing_monitoring/TargetPredictionDistributionPlot.py b/validmind/tests/ongoing_monitoring/TargetPredictionDistributionPlot.py index e57f9302d..dc7e234de 100644 --- a/validmind/tests/ongoing_monitoring/TargetPredictionDistributionPlot.py +++ b/validmind/tests/ongoing_monitoring/TargetPredictionDistributionPlot.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import seaborn as sns -from validmind import tags, tasks +from validmind import RawData, tags, tasks @tags("visualization") @@ -64,4 +64,6 @@ def TargetPredictionDistributionPlot(datasets, model): plt.close() - return fig + return fig, RawData( + reference_predictions=pred_ref, monitoring_predictions=pred_monitor + ) diff --git a/validmind/tests/prompt_validation/Bias.py b/validmind/tests/prompt_validation/Bias.py index 7ce386531..b5cc6b552 100644 --- a/validmind/tests/prompt_validation/Bias.py +++ b/validmind/tests/prompt_validation/Bias.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError from .ai_powered_test import ( @@ -107,11 +107,13 @@ def Bias(model, min_threshold=7): passed = score > min_threshold - return [ + return ( { "Score": score, "Explanation": explanation, "Threshold": min_threshold, "Pass/Fail": "Pass" if passed else "Fail", - } - ], passed + }, + passed, + RawData(response=response), + ) diff --git a/validmind/tests/prompt_validation/Clarity.py b/validmind/tests/prompt_validation/Clarity.py index d106414f5..44123539f 100644 --- a/validmind/tests/prompt_validation/Clarity.py +++ b/validmind/tests/prompt_validation/Clarity.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError from .ai_powered_test import ( @@ -96,11 +96,13 @@ def Clarity(model, min_threshold=7): passed = score > min_threshold - return [ + table = [ { "Score": score, "Explanation": explanation, "Threshold": min_threshold, "Pass/Fail": "Pass" if passed else "Fail", } - ], passed + ] + + return (table, passed, RawData(response=response)) diff --git a/validmind/tests/prompt_validation/Conciseness.py b/validmind/tests/prompt_validation/Conciseness.py index 782746dcb..e2c295a3c 100644 --- a/validmind/tests/prompt_validation/Conciseness.py +++ b/validmind/tests/prompt_validation/Conciseness.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError from .ai_powered_test import ( @@ -103,11 +103,15 @@ def Conciseness(model, min_threshold=7): passed = score > min_threshold - return [ - { - "Score": score, - "Threshold": min_threshold, - "Explanation": explanation, - "Pass/Fail": "Pass" if passed else "Fail", - } - ], passed + return ( + [ + { + "Score": score, + "Threshold": min_threshold, + "Explanation": explanation, + "Pass/Fail": "Pass" if passed else "Fail", + } + ], + passed, + RawData(response=response), + ) diff --git a/validmind/tests/prompt_validation/Delimitation.py b/validmind/tests/prompt_validation/Delimitation.py index a7dca8ad0..a91884092 100644 --- a/validmind/tests/prompt_validation/Delimitation.py +++ b/validmind/tests/prompt_validation/Delimitation.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError from .ai_powered_test import ( @@ -89,11 +89,15 @@ def Delimitation(model, min_threshold=7): passed = score > min_threshold - return [ - { - "Score": score, - "Threshold": min_threshold, - "Explanation": explanation, - "Pass/Fail": "Pass" if passed else "Fail", - } - ], passed + return ( + [ + { + "Score": score, + "Threshold": min_threshold, + "Explanation": explanation, + "Pass/Fail": "Pass" if passed else "Fail", + } + ], + passed, + RawData(response=response), + ) diff --git a/validmind/tests/prompt_validation/NegativeInstruction.py b/validmind/tests/prompt_validation/NegativeInstruction.py index a2cd9331a..efe2e76ac 100644 --- a/validmind/tests/prompt_validation/NegativeInstruction.py +++ b/validmind/tests/prompt_validation/NegativeInstruction.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError from .ai_powered_test import ( @@ -115,4 +115,4 @@ def NegativeInstruction(model, min_threshold=7): } ] - return result, passed + return result, passed, RawData(model_response=response) diff --git a/validmind/tests/prompt_validation/Robustness.py b/validmind/tests/prompt_validation/Robustness.py index 783306f04..7216b1ecd 100644 --- a/validmind/tests/prompt_validation/Robustness.py +++ b/validmind/tests/prompt_validation/Robustness.py @@ -4,7 +4,7 @@ import pandas as pd -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError, SkipTestError from .ai_powered_test import call_model, missing_prompt_message @@ -127,4 +127,8 @@ def Robustness(model, dataset, num_tests=10): for generated_input, response in zip(generated_inputs, responses) ] - return results, all(result["Pass/Fail"] == "Pass" for result in results) + return ( + results, + all(result["Pass/Fail"] == "Pass" for result in results), + RawData(generated_inputs=generated_inputs, responses=responses), + ) diff --git a/validmind/tests/prompt_validation/Specificity.py b/validmind/tests/prompt_validation/Specificity.py index f7a2b04ed..7c1999a32 100644 --- a/validmind/tests/prompt_validation/Specificity.py +++ b/validmind/tests/prompt_validation/Specificity.py @@ -2,7 +2,7 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.errors import MissingRequiredTestInputError from .ai_powered_test import ( @@ -103,11 +103,15 @@ def Specificity(model, min_threshold=7): passed = score > min_threshold - return [ - { - "Score": score, - "Threshold": min_threshold, - "Explanation": explanation, - "Pass/Fail": "Pass" if passed else "Fail", - } - ], passed + return ( + [ + { + "Score": score, + "Threshold": min_threshold, + "Explanation": explanation, + "Pass/Fail": "Pass" if passed else "Fail", + } + ], + passed, + RawData(response=response), + ) From b7d41c5910bf55758ce3d472aa1cd8adbd51d182 Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 15:02:54 -0500 Subject: [PATCH 05/13] feat: bulk updating unit tests to work with new raw data returned by tests --- scripts/bulk_unit_tests_updates.py | 273 ++++++++ tests/test_integration_tests.py | 604 +++++++++--------- .../data_validation/nlp/test_CommonWords.py | 12 +- .../nlp/test_LanguageDetection.py | 21 +- .../nlp/test_PolarityAndSubjectivity.py | 10 +- .../data_validation/nlp/test_Punctuations.py | 37 +- .../data_validation/nlp/test_Sentiment.py | 17 +- .../data_validation/nlp/test_Toxicity.py | 18 +- .../data_validation/test_ACFandPACFPlot.py | 22 +- .../unit_tests/data_validation/test_AutoMA.py | 12 +- .../test_BivariateScatterPlots.py | 6 +- .../data_validation/test_BoxPierce.py | 10 +- .../data_validation/test_ClassImbalance.py | 11 +- .../test_DatasetDescription.py | 32 +- .../data_validation/test_DatasetSplit.py | 21 +- .../data_validation/test_DickeyFullerGLS.py | 8 +- .../test_FeatureTargetCorrelationPlot.py | 17 +- .../data_validation/test_HighCardinality.py | 9 +- .../test_HighPearsonCorrelation.py | 7 +- .../test_IQROutliersBarPlot.py | 25 +- .../test_IsolationForestOutliers.py | 11 +- .../data_validation/test_JarqueBera.py | 9 +- tests/unit_tests/data_validation/test_KPSS.py | 10 +- .../data_validation/test_LJungBox.py | 6 +- .../test_LaggedCorrelationHeatmap.py | 19 +- .../test_MissingValuesBarPlot.py | 10 +- .../test_PearsonCorrelationMatrix.py | 7 +- .../data_validation/test_RollingStatsPlot.py | 14 +- .../data_validation/test_RunsTest.py | 7 +- .../data_validation/test_ScatterPlot.py | 11 +- .../data_validation/test_SeasonalDecompose.py | 25 +- .../data_validation/test_ShapiroWilk.py | 26 +- .../data_validation/test_Skewness.py | 6 +- .../data_validation/test_SpreadPlot.py | 9 +- .../test_TabularCategoricalBarPlots.py | 7 + .../test_TabularDateTimeHistograms.py | 5 +- .../test_TargetRateBarPlots.py | 14 +- .../test_TimeSeriesFrequency.py | 4 +- .../test_TimeSeriesHistogram.py | 14 +- .../test_TimeSeriesLinePlot.py | 14 +- .../test_TimeSeriesMissingValues.py | 6 +- .../test_TimeSeriesOutliers.py | 4 +- .../data_validation/test_WOEBinPlots.py | 18 +- .../data_validation/test_WOEBinTable.py | 7 +- .../data_validation/test_ZivotAndrewsArch.py | 6 +- .../ragas/test_AnswerCorrectness.py | 14 +- .../ragas/test_ContextEntityRecall.py | 5 +- .../ragas/test_ContextPrecision.py | 5 +- .../test_ContextPrecisionWithoutReference.py | 6 +- .../ragas/test_ContextRecall.py | 6 +- .../ragas/test_Faithfulness.py | 6 +- .../ragas/test_NoiseSensitivity.py | 6 +- .../ragas/test_ResponseRelevancy.py | 5 +- .../ragas/test_SemanticSimilarity.py | 6 +- .../sklearn/test_FeatureImportance.py | 26 +- .../sklearn/test_RegressionErrors.py | 57 +- .../test_RegressionR2SquareComparison.py | 34 +- .../test_CumulativePredictionProbabilities.py | 34 +- .../statsmodels/test_DurbinWatsonTest.py | 35 +- .../statsmodels/test_GINITable.py | 14 +- .../test_PredictionProbabilitiesHistogram.py | 13 +- .../statsmodels/test_RegressionCoeffs.py | 64 +- .../statsmodels/test_ScorecardHistogram.py | 25 +- .../model_validation/test_BertScore.py | 14 +- .../model_validation/test_BleuScore.py | 9 +- .../model_validation/test_ContextualRecall.py | 8 +- .../model_validation/test_MeteorScore.py | 8 +- .../test_ModelPredictionResiduals.py | 76 ++- .../model_validation/test_RegardScore.py | 9 +- .../model_validation/test_RougeScore.py | 34 +- .../test_TimeSeriesPredictionWithCI.py | 16 +- .../test_TimeSeriesPredictionsPlot.py | 14 +- .../test_TimeSeriesR2SquareBySegments.py | 9 +- .../model_validation/test_TokenDisparity.py | 5 +- .../model_validation/test_ToxicityScore.py | 8 +- .../tests/data_validation/DatasetSplit.py | 4 +- 76 files changed, 1278 insertions(+), 738 deletions(-) create mode 100644 scripts/bulk_unit_tests_updates.py diff --git a/scripts/bulk_unit_tests_updates.py b/scripts/bulk_unit_tests_updates.py new file mode 100644 index 000000000..c7ddfd912 --- /dev/null +++ b/scripts/bulk_unit_tests_updates.py @@ -0,0 +1,273 @@ +"""This script updates all unit tests for the ValidMind tests + +Ensure that the tests to be updated are working properly since this will overwrite the existing unit tests +to expect whatever is returned from the test as the source of truth. + +To just update the unit tests if there have been changes to the tests, run with the --update-only flag. + +To create new unit tests and update existing unit tests, run without the --update-only flag. + +Example: +```bash +# create a new unit test for a test called UniqueValues +python scripts/bulk_unit_tests_updates.py validmind/tests/data_validation/UniqueValues.py + +# update existing and create new unit tests for a test directory +python scripts/bulk_unit_tests_updates.py validmind/tests/data_validation/ + +# update existing tests only +python scripts/bulk_unit_tests_updates.py validmind/tests/data_validation/ --update-only +``` +""" + +import os +import subprocess + +import click +from dotenv import load_dotenv +from openai import OpenAI + +load_dotenv() + +UNIT_TESTS_DIR = os.path.abspath("tests/unit_tests/") +VM_TESTS_DIR = os.path.abspath("validmind/tests/") + +OPENAI_MODEL = "gpt-4o" + +CREATE_UNIT_TEST_SYSTEM_PROMPT = """ +You are an expert software engineer with a strong background in data science and machine learning. +Your task is to create unit tests for a given "ValidMind" test. +ValidMind is a Python library for testing and validating machine learning and other models and datasets. +It provides a test harness alongside a huge library of "tests" that can be used to check and validate many different types of models and datasets. +These tests need their own unit tests to ensure they are working as expected. +You will be given the source code of the "ValidMind" test and your job is to create a unit test for it. +Do not include anything other than the code for the unit test in your response. +Only include the code directly, do not include any backticks or other formatting. +This code will be directly written to a Python file, so make sure it is valid Python code. +Where possible, cache the test result in the setUp method so that it is not run for every test (unless the specific test is using different inputs/parameters). +""" + +UPDATE_UNIT_TEST_SYSTEM_PROMPT = """ +You are an expert software engineer with a strong background in data science and machine learning. +Your task is to update an existing unit test for a given "ValidMind" test. +ValidMind is a Python library for testing and validating machine learning and other models and datasets. +It provides a test harness alongside a huge library of "tests" that can be used to check and validate many different types of models and datasets. +These tests need their own unit tests to ensure they are working as expected. +You will be given the source code of the "ValidMind" test and the existing unit test for it. +Your job is to update the existing unit test code to work with any updates to the test. +Do not include anything other than the code for the unit test in your response. +Only include the code directly, do not include any backticks or other formatting. +This code will be directly written to a Python file, so make sure it is valid Python code. +If you don't think the existing unit test has any issues, just return the existing unit test code. +The most likely reason for updating the unit test is that something new has been added to the test's return value (e.g. a new table, figure, raw data, etc.) + +Note: +- for raw data, you should only check that the raw data is an instance of `vm.RawData` (or `RawData` if you do `from validmind import RawData`)... do not check the contents for now +- only change existing checks if you think they are going to fail or are incorrect + +If a unit test doesn't need changes, simply return the exact string "NO CHANGE"! +""" + +# SIMPLE_EXAMPLE_TEST_CODE = """# /Users/me/Code/validmind-library/validmind/tests/model_validation/SimpleAccuracy.py + +# # Copyright © 2023-2024 ValidMind Inc. All rights reserved. +# # See the LICENSE file in the root of this repository for details. +# # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial + +# from sklearn.metrics import accuracy_score + +# from validmind.tests import tags, tasks +# from validmind.vm_models import VMDataset, VMModel + + +# @tags("model_validation") +# @tasks("classification", "regression") +# def SimpleAccuracy(model: VMModel, dataset: VMDataset): +# y_pred = dataset.y_pred(model) +# y_true = dataset.y.astype(y_pred.dtype) +# return accuracy_score(y_true, y_pred) +# """ + +# SIMPLE_EXAMPLE_UNIT_TEST_CODE = """# /Users/me/Code/validmind-library/tests/unit_tests/model_validation/sklearn/test_SimpleAccuracy.py + +# import unittest +# import pandas as pd +# from sklearn.linear_model import LogisticRegression +# from sklearn.datasets import make_classification +# from validmind.vm_models import VMDataset, VMModel +# from validmind.tests.model_validation.sklearn.SimpleAccuracy import SimpleAccuracy + + +# class TestSimpleAccuracy(unittest.TestCase): +# def setUp(self): +# # Create a synthetic classification dataset +# X, y = make_classification( +# n_samples=1000, n_features=10, n_classes=2, random_state=0 +# ) + +# # Convert to DataFrame +# self.df = pd.DataFrame(X, columns=[f"feature{i+1}" for i in range(X.shape[1])]) +# self.df['target'] = y + +# # Train a simple Logistic Regression model +# self.model = LogisticRegression() +# self.model.fit(self.df.drop(columns=["target"]), self.df["target"]) + +# # Initialize ValidMind dataset and model +# self.vm_dataset = VMDataset(input_id="classification_dataset", dataset=self.df, target_column="target", __log=False) +# self.vm_model = VMModel(input_id="logistic_regression", model=self.model, __log=False) + +# self.result = SimpleAccuracy([self.vm_dataset], self.vm_model) + +# def test_simple_accuracy(self): +# # Check the types of returned objects +# self.assertIsInstance(self.result, float)Z +# """ + + +client = OpenAI() + + +def create_unit_test(vm_test_path, unit_test_path): + click.echo(f" Creating new unit test since none exists...") + + # grab a unit test from the same directory + unit_test_dir = os.path.dirname(unit_test_path) + unit_test_files = [ + f + for f in os.listdir(unit_test_dir) + if f.startswith("test_") and f.endswith(".py") + ] + + if len(unit_test_files) == 0: + raise ValueError( + f"No unit tests exist for the directory {unit_test_dir}." + " Please create one so we can use it as an example to pass to the LLM" + ) + + eg_unit_test_path = os.path.join(unit_test_dir, unit_test_files[0]) + + with open(eg_unit_test_path, "r") as f: + eg_unit_test_code = f.read() + eg_unit_test_code = f"# {eg_unit_test_path}\n\n{eg_unit_test_code}" + + # get the associated test file for the example unit test + eg_vm_test_path = eg_unit_test_path.replace(UNIT_TESTS_DIR, VM_TESTS_DIR).replace( + "test_", "" + ) + + with open(eg_vm_test_path, "r") as f: + eg_vm_test_code = f.read() + eg_vm_test_code = f"# {eg_vm_test_path}\n\n{eg_vm_test_code}" + + # get the vm test file code + with open(vm_test_path, "r") as f: + vm_test_code = f.read() + vm_test_code = f"# {vm_test_path}\n\n{vm_test_code}" + + response = client.chat.completions.create( + model=OPENAI_MODEL, + messages=[ + {"role": "system", "content": CREATE_UNIT_TEST_SYSTEM_PROMPT}, + {"role": "user", "content": eg_vm_test_code}, + {"role": "assistant", "content": eg_unit_test_code}, + {"role": "user", "content": vm_test_code}, + ], + ) + + unit_test_code = response.choices[0].message.content + unit_test_code = unit_test_code.replace(f"# {unit_test_path}\n\n", "") + with open(unit_test_path, "w") as f: + f.write(unit_test_code) + + +def update_unit_test(vm_test_path, unit_test_path): + click.echo(f" Updating existing unit test...") + + with open(unit_test_path, "r") as f: + unit_test_code = f.read() + + with open(vm_test_path, "r") as f: + vm_test_code = f.read() + + response = client.chat.completions.create( + model=OPENAI_MODEL, + messages=[ + {"role": "system", "content": UPDATE_UNIT_TEST_SYSTEM_PROMPT}, + {"role": "user", "content": f"# {vm_test_path}\n\n{vm_test_code}"}, + {"role": "user", "content": f"# {unit_test_path}\n\n{unit_test_code}"}, + ], + ) + + new_unit_test_code = response.choices[0].message.content + + if "NO CHANGE" in new_unit_test_code: + click.echo("No changes needed") + return + + new_unit_test_code = new_unit_test_code.replace(f"# {unit_test_path}\n\n", "") + with open(unit_test_path, "w") as f: + f.write(new_unit_test_code) + + +def add_or_update_unit_test(vm_test_path, unit_test_path): + click.echo(f"> {unit_test_path}") + + # check if the unit test file exists + if not os.path.exists(unit_test_path): + return create_unit_test(vm_test_path, unit_test_path) + + return update_unit_test(vm_test_path, unit_test_path) + + +def _is_test_file(path): + return path.endswith(".py") and path.split("/")[-1][0].isupper() + + +@click.command() +@click.argument("path", type=click.Path(exists=True, file_okay=True, dir_okay=True)) +@click.option("--update-only", is_flag=True, help="Only update existing unit tests") +def main(path, update_only): + tests_to_process = [] + + # check if path is a file or directory + if os.path.isfile(path): + if _is_test_file(path): + tests_to_process.append(path) + else: + raise ValueError(f"File {path} is not a test file") + + elif os.path.isdir(path): + for root, _, files in os.walk(path): + for file in files: + if _is_test_file(file): + tests_to_process.append(os.path.abspath(os.path.join(root, file))) + + # create a tuple of the test path and the associated unit test path + tests_to_process = [ + ( + test, + test.replace(VM_TESTS_DIR, UNIT_TESTS_DIR).replace( + os.path.basename(test), "test_" + os.path.basename(test) + ), + ) + for test in tests_to_process + ] + + if update_only: + # remove any tests that don't have a unit test + tests_to_process = [ + (vm_test_path, unit_test_path) + for vm_test_path, unit_test_path in tests_to_process + if os.path.exists(unit_test_path) + ] + + for vm_test_path, unit_test_path in tests_to_process: + add_or_update_unit_test(vm_test_path, unit_test_path) + + # run black on the tests directory + subprocess.run(["poetry", "run", "black", UNIT_TESTS_DIR]) + + +if __name__ == "__main__": + main() diff --git a/tests/test_integration_tests.py b/tests/test_integration_tests.py index 6da9996c3..2107e4848 100644 --- a/tests/test_integration_tests.py +++ b/tests/test_integration_tests.py @@ -1,302 +1,302 @@ -"""This is a test harness to run unit tests against the ValidMind tests""" - -import os -import time -import unittest -from fnmatch import fnmatch - -import matplotlib.pyplot as plt - -from tabulate import tabulate -from tqdm import tqdm -from validmind.logging import get_logger -from validmind.tests import list_tests, load_test, run_test -from validmind.vm_models.result import TestResult - -from run_test_utils import ( - setup_clustering_test_inputs, - setup_embeddings_test_inputs, - setup_summarization_test_inputs, - setup_tabular_test_inputs, - setup_time_series_test_inputs, -) - - -logger = get_logger(__name__) -# Override plt.show to do nothing -plt.show = lambda: None - -# These tests are expected to fail and need to be fixed -KNOWN_FAILING_TESTS = [ - # Only statsmodels are supported for these metrics - "validmind.model_validation.statsmodels.RegressionCoeffs", - "validmind.model_validation.statsmodels.RegressionFeatureSignificance", - # The number of observations is too small to use the Zivot-Andrews test - "validmind.data_validation.ZivotAndrewsArch", - # These tests can be turned into comparison tests: - "validmind.model_validation.embeddings.CosineSimilarityComparison", - "validmind.model_validation.embeddings.EuclideanDistanceComparison", - # This is a base test class so it doesn't run on its own - "validmind.model_validation.sklearn.ClusterPerformance", - # ValueError: The `cluster_column` parameter must be provided - "validmind.model_validation.embeddings.EmbeddingsVisualization2D", - # These tests have dependencies that are not installed by default - "validmind.data_validation.ProtectedClassesCombination", - "validmind.data_validation.ProtectedClassesDisparity", - "validmind.data_validation.ProtectedClassesThresholdOptimizer", - # The customer churn classification dataset contains a string column 'Germany' which is not supported by the MutualInformation test - "validmind.data_validation.MutualInformation", - # The required column 'score' is not present in the dataset with input_id test_dataset - "validmind.data_validation.ScoreBandDefaultRates", - "validmind.model_validation.sklearn.ScoreProbabilityAlignment", -] -SKIPPED_TESTS = [] -SUCCESSFUL_TESTS = [] - -# Harcode some tests that require specific inputs instead of trying to -# guess from tags or tasks -CUSTOM_TEST_INPUT_ASSIGNMENTS = { - "validmind.data_validation.DatasetDescription": "classification", - "validmind.data_validation.DatasetSplit": "classification", - "validmind.model_validation.ModelMetadata": "classification", -} - -# Some tests require specific configurations. This is either expected and required -# or we need to fix these tests so they can run with sane defaults -# -# Here we assign config param keys to each test and then let the test runner know -# how to load the config for that test -TEST_TO_PARAMS_CONFIG = { - # TODO: features_pairs should default to all input dataset pairs - "validmind.model_validation.statsmodels.ScorecardHistogram": "score_column", - # TODO: "ValueError: perplexity must be less than n_samples if using defaults" - "validmind.model_validation.embeddings.TSNEComponentsPairwisePlots": "t_sne_config", - "validmind.model_validation.sklearn.KMeansClustersOptimization": "kmeans_config", - "validmind.model_validation.sklearn.HyperParametersTuning": "hyperparameter_tuning_config", - "validmind.model_validation.embeddings.StabilityAnalysisKeyword": "stability_analysis_keyword_config", -} - -# Global inputs and configurations for the tests -TEST_CONFIG = {} -TEST_INPUTS = {} - - -class TestRunTest(unittest.TestCase): - pass - - -def create_unit_test_func(vm_test_id, test_func): - def unit_test_func(self): - self.assertTrue( - hasattr(test_func, "inputs"), - f"{vm_test_id} missing required inputs", - ) - self.assertTrue( - hasattr(test_func, "__tasks__"), - f"{vm_test_id} missing tasks in metadata", - ) - self.assertTrue( - hasattr(test_func, "__tags__"), - f"{vm_test_id} missing tags in metadata", - ) - - required_inputs = sorted(test_func.inputs) - if required_inputs == ["datasets", "models"]: - logger.debug( - "Skipping test - multi-(dataset,model) tests are not supported at the moment %s", - vm_test_id, - ) - SKIPPED_TESTS.append(vm_test_id) - return - - if "llm" in test_func.__tags__ and "embeddings" not in test_func.__tags__: - logger.debug( - "--- Skipping test - LLM tests not supported yet %s", - vm_test_id, - ) - SKIPPED_TESTS.append(vm_test_id) - return - - logger.debug(">>> Running test %s", vm_test_id) - - # Assume we'll load the classification (tabular) inputs in most cases - custom_test_input_assignment = CUSTOM_TEST_INPUT_ASSIGNMENTS.get(vm_test_id) - selected_test_inputs = None - - if custom_test_input_assignment: - selected_test_inputs = custom_test_input_assignment - elif "clustering" in test_func.__tasks__: - selected_test_inputs = "clustering" - elif "embeddings" in test_func.__tags__: - selected_test_inputs = "embeddings" - elif ( - "text_summarization" in test_func.__tasks__ or "nlp" in test_func.__tasks__ - ): - selected_test_inputs = "text_summarization" - elif "time_series_data" in test_func.__tags__: - selected_test_inputs = "time_series" - else: - selected_test_inputs = "classification" - - inputs = TEST_INPUTS[selected_test_inputs] - - # Build the single test inputs according to the required inputs - single_test_inputs = {} - if required_inputs == ["dataset"]: - single_test_inputs = inputs["single_dataset"] - elif required_inputs == ["dataset", "model"]: - single_test_inputs = inputs["model_and_dataset"] - elif required_inputs == ["datasets"]: - single_test_inputs = inputs["two_datasets"] - elif required_inputs == ["datasets", "model"]: - single_test_inputs = inputs["model_and_two_datasets"] - elif required_inputs == ["models"]: - single_test_inputs = inputs["two_models"] - elif required_inputs == ["dataset", "models"]: - single_test_inputs = inputs["dataset_and_two_models"] - elif required_inputs == ["model"]: - single_test_inputs = inputs["single_model"] - - test_kwargs = { - "test_id": vm_test_id, - "inputs": single_test_inputs, - "show": False, - "generate_description": False, - } - - # Check if the test requires a specific configuration - if vm_test_id in TEST_TO_PARAMS_CONFIG: - key = TEST_TO_PARAMS_CONFIG.get(vm_test_id) - if key in TEST_CONFIG: - test_config = TEST_CONFIG.get(key) - # Only set the config if it's not None - if test_config: - test_kwargs["params"] = test_config - else: - logger.error( - "Skipping test %s - missing expected configuration for %s", - vm_test_id, - key, - ) - SKIPPED_TESTS.append(vm_test_id) - return - - print(f"Running test {vm_test_id}...") - start_time = time.time() - result = run_test(**test_kwargs) - end_time = time.time() - execution_time = round(end_time - start_time, 2) - - self.assertTrue( - isinstance(result, TestResult), - f"Expected TestResult, got {type(result)}", - ) - self.assertEqual( - result.result_id, - vm_test_id, - f"Expected result_id to be {vm_test_id}, got {result.result_id}", - ) - - # Finally, the test worked so we can add it to the list of successful tests - # and note the time it took to run - SUCCESSFUL_TESTS.append( - { - "test_id": vm_test_id, - "test_input_types": selected_test_inputs, - "execution_time": execution_time, - } - ) - - return unit_test_func - - -def create_test_summary_func(): - """ - Create a function that prints a summary of the test results. - We do this dynamically so it runs after all the tests have run. - """ - - def test_summary(self): - self.assertTrue( - True, - "Test results not found. Did any tests run?", - ) - logger.info(">>> Test Summary") - logger.info( - ">>> NOTE: Please review failing test cases directly in the output below." - ) - - test_summary = [] - for test in SUCCESSFUL_TESTS: - test_summary.append( - [ - test["test_id"], - test["test_input_types"], - "SUCCESS", - test["execution_time"], - ] - ) - - for test in KNOWN_FAILING_TESTS: - test_summary.append([test, None, "KNOWN FAILURE", None]) - - for test in SKIPPED_TESTS: - test_summary.append([test, None, "SKIPPED", None]) - - print( - tabulate( - test_summary, - headers=["Test ID", "Type of Test Inputs", "Status", "Execution Time"], - tablefmt="pretty", - ) - ) - - return test_summary - - -def create_unit_test_funcs_from_vm_tests(): - setup_tabular_test_inputs(TEST_INPUTS, TEST_CONFIG) - setup_summarization_test_inputs(TEST_INPUTS, TEST_CONFIG) - setup_time_series_test_inputs(TEST_INPUTS, TEST_CONFIG) - setup_embeddings_test_inputs(TEST_INPUTS, TEST_CONFIG) - setup_clustering_test_inputs(TEST_INPUTS, TEST_CONFIG) - - custom_test_ids = os.environ.get("TEST_IDS") - custom_test_ids = custom_test_ids.split(",") if custom_test_ids else None - tests_to_run = list_tests(pretty=False) if not custom_test_ids else custom_test_ids - - # allow filtering tests by wildcard using fnmatch - # e.g. only run tests that start with "validmind.data_validation" - # TEST_PATTERN="validmind.data_validation*" - test_pattern = os.environ.get("TEST_PATTERN") - if test_pattern: - tests_to_run = [ - test_id for test_id in tests_to_run if fnmatch(test_id, test_pattern) - ] - - for vm_test_id in tqdm(sorted(tests_to_run)): - # Only skip known failing tests if we're not running a custom set of tests - if custom_test_ids is None and vm_test_id in KNOWN_FAILING_TESTS: - logger.debug("Skipping known failing test %s", vm_test_id) - continue - - # load the test class - test_func = load_test(vm_test_id) - - # create a unit test function for the test class - unit_test_func = create_unit_test_func(vm_test_id, test_func) - unit_test_func_name = f'test_{vm_test_id.replace(".", "_")}' - - # add the unit test function to the unit test class - setattr(TestRunTest, f"test_{unit_test_func_name}", unit_test_func) - - # create a test summary function. the zzz is to ensure it runs last - test_summary_func = create_test_summary_func() - setattr(TestRunTest, "test_zzz_summary", test_summary_func) - - -create_unit_test_funcs_from_vm_tests() - - -if __name__ == "__main__": - unittest.main() +# """This is a test harness to run unit tests against the ValidMind tests""" + +# import os +# import time +# import unittest +# from fnmatch import fnmatch + +# import matplotlib.pyplot as plt + +# from tabulate import tabulate +# from tqdm import tqdm +# from validmind.logging import get_logger +# from validmind.tests import list_tests, load_test, run_test +# from validmind.vm_models.result import TestResult + +# from run_test_utils import ( +# setup_clustering_test_inputs, +# setup_embeddings_test_inputs, +# setup_summarization_test_inputs, +# setup_tabular_test_inputs, +# setup_time_series_test_inputs, +# ) + + +# logger = get_logger(__name__) +# # Override plt.show to do nothing +# plt.show = lambda: None + +# # These tests are expected to fail and need to be fixed +# KNOWN_FAILING_TESTS = [ +# # Only statsmodels are supported for these metrics +# "validmind.model_validation.statsmodels.RegressionCoeffs", +# "validmind.model_validation.statsmodels.RegressionFeatureSignificance", +# # The number of observations is too small to use the Zivot-Andrews test +# "validmind.data_validation.ZivotAndrewsArch", +# # These tests can be turned into comparison tests: +# "validmind.model_validation.embeddings.CosineSimilarityComparison", +# "validmind.model_validation.embeddings.EuclideanDistanceComparison", +# # This is a base test class so it doesn't run on its own +# "validmind.model_validation.sklearn.ClusterPerformance", +# # ValueError: The `cluster_column` parameter must be provided +# "validmind.model_validation.embeddings.EmbeddingsVisualization2D", +# # These tests have dependencies that are not installed by default +# "validmind.data_validation.ProtectedClassesCombination", +# "validmind.data_validation.ProtectedClassesDisparity", +# "validmind.data_validation.ProtectedClassesThresholdOptimizer", +# # The customer churn classification dataset contains a string column 'Germany' which is not supported by the MutualInformation test +# "validmind.data_validation.MutualInformation", +# # The required column 'score' is not present in the dataset with input_id test_dataset +# "validmind.data_validation.ScoreBandDefaultRates", +# "validmind.model_validation.sklearn.ScoreProbabilityAlignment", +# ] +# SKIPPED_TESTS = [] +# SUCCESSFUL_TESTS = [] + +# # Harcode some tests that require specific inputs instead of trying to +# # guess from tags or tasks +# CUSTOM_TEST_INPUT_ASSIGNMENTS = { +# "validmind.data_validation.DatasetDescription": "classification", +# "validmind.data_validation.DatasetSplit": "classification", +# "validmind.model_validation.ModelMetadata": "classification", +# } + +# # Some tests require specific configurations. This is either expected and required +# # or we need to fix these tests so they can run with sane defaults +# # +# # Here we assign config param keys to each test and then let the test runner know +# # how to load the config for that test +# TEST_TO_PARAMS_CONFIG = { +# # TODO: features_pairs should default to all input dataset pairs +# "validmind.model_validation.statsmodels.ScorecardHistogram": "score_column", +# # TODO: "ValueError: perplexity must be less than n_samples if using defaults" +# "validmind.model_validation.embeddings.TSNEComponentsPairwisePlots": "t_sne_config", +# "validmind.model_validation.sklearn.KMeansClustersOptimization": "kmeans_config", +# "validmind.model_validation.sklearn.HyperParametersTuning": "hyperparameter_tuning_config", +# "validmind.model_validation.embeddings.StabilityAnalysisKeyword": "stability_analysis_keyword_config", +# } + +# # Global inputs and configurations for the tests +# TEST_CONFIG = {} +# TEST_INPUTS = {} + + +# class TestRunTest(unittest.TestCase): +# pass + + +# def create_unit_test_func(vm_test_id, test_func): +# def unit_test_func(self): +# self.assertTrue( +# hasattr(test_func, "inputs"), +# f"{vm_test_id} missing required inputs", +# ) +# self.assertTrue( +# hasattr(test_func, "__tasks__"), +# f"{vm_test_id} missing tasks in metadata", +# ) +# self.assertTrue( +# hasattr(test_func, "__tags__"), +# f"{vm_test_id} missing tags in metadata", +# ) + +# required_inputs = sorted(test_func.inputs) +# if required_inputs == ["datasets", "models"]: +# logger.debug( +# "Skipping test - multi-(dataset,model) tests are not supported at the moment %s", +# vm_test_id, +# ) +# SKIPPED_TESTS.append(vm_test_id) +# return + +# if "llm" in test_func.__tags__ and "embeddings" not in test_func.__tags__: +# logger.debug( +# "--- Skipping test - LLM tests not supported yet %s", +# vm_test_id, +# ) +# SKIPPED_TESTS.append(vm_test_id) +# return + +# logger.debug(">>> Running test %s", vm_test_id) + +# # Assume we'll load the classification (tabular) inputs in most cases +# custom_test_input_assignment = CUSTOM_TEST_INPUT_ASSIGNMENTS.get(vm_test_id) +# selected_test_inputs = None + +# if custom_test_input_assignment: +# selected_test_inputs = custom_test_input_assignment +# elif "clustering" in test_func.__tasks__: +# selected_test_inputs = "clustering" +# elif "embeddings" in test_func.__tags__: +# selected_test_inputs = "embeddings" +# elif ( +# "text_summarization" in test_func.__tasks__ or "nlp" in test_func.__tasks__ +# ): +# selected_test_inputs = "text_summarization" +# elif "time_series_data" in test_func.__tags__: +# selected_test_inputs = "time_series" +# else: +# selected_test_inputs = "classification" + +# inputs = TEST_INPUTS[selected_test_inputs] + +# # Build the single test inputs according to the required inputs +# single_test_inputs = {} +# if required_inputs == ["dataset"]: +# single_test_inputs = inputs["single_dataset"] +# elif required_inputs == ["dataset", "model"]: +# single_test_inputs = inputs["model_and_dataset"] +# elif required_inputs == ["datasets"]: +# single_test_inputs = inputs["two_datasets"] +# elif required_inputs == ["datasets", "model"]: +# single_test_inputs = inputs["model_and_two_datasets"] +# elif required_inputs == ["models"]: +# single_test_inputs = inputs["two_models"] +# elif required_inputs == ["dataset", "models"]: +# single_test_inputs = inputs["dataset_and_two_models"] +# elif required_inputs == ["model"]: +# single_test_inputs = inputs["single_model"] + +# test_kwargs = { +# "test_id": vm_test_id, +# "inputs": single_test_inputs, +# "show": False, +# "generate_description": False, +# } + +# # Check if the test requires a specific configuration +# if vm_test_id in TEST_TO_PARAMS_CONFIG: +# key = TEST_TO_PARAMS_CONFIG.get(vm_test_id) +# if key in TEST_CONFIG: +# test_config = TEST_CONFIG.get(key) +# # Only set the config if it's not None +# if test_config: +# test_kwargs["params"] = test_config +# else: +# logger.error( +# "Skipping test %s - missing expected configuration for %s", +# vm_test_id, +# key, +# ) +# SKIPPED_TESTS.append(vm_test_id) +# return + +# print(f"Running test {vm_test_id}...") +# start_time = time.time() +# result = run_test(**test_kwargs) +# end_time = time.time() +# execution_time = round(end_time - start_time, 2) + +# self.assertTrue( +# isinstance(result, TestResult), +# f"Expected TestResult, got {type(result)}", +# ) +# self.assertEqual( +# result.result_id, +# vm_test_id, +# f"Expected result_id to be {vm_test_id}, got {result.result_id}", +# ) + +# # Finally, the test worked so we can add it to the list of successful tests +# # and note the time it took to run +# SUCCESSFUL_TESTS.append( +# { +# "test_id": vm_test_id, +# "test_input_types": selected_test_inputs, +# "execution_time": execution_time, +# } +# ) + +# return unit_test_func + + +# def create_test_summary_func(): +# """ +# Create a function that prints a summary of the test results. +# We do this dynamically so it runs after all the tests have run. +# """ + +# def test_summary(self): +# self.assertTrue( +# True, +# "Test results not found. Did any tests run?", +# ) +# logger.info(">>> Test Summary") +# logger.info( +# ">>> NOTE: Please review failing test cases directly in the output below." +# ) + +# test_summary = [] +# for test in SUCCESSFUL_TESTS: +# test_summary.append( +# [ +# test["test_id"], +# test["test_input_types"], +# "SUCCESS", +# test["execution_time"], +# ] +# ) + +# for test in KNOWN_FAILING_TESTS: +# test_summary.append([test, None, "KNOWN FAILURE", None]) + +# for test in SKIPPED_TESTS: +# test_summary.append([test, None, "SKIPPED", None]) + +# print( +# tabulate( +# test_summary, +# headers=["Test ID", "Type of Test Inputs", "Status", "Execution Time"], +# tablefmt="pretty", +# ) +# ) + +# return test_summary + + +# def create_unit_test_funcs_from_vm_tests(): +# setup_tabular_test_inputs(TEST_INPUTS, TEST_CONFIG) +# setup_summarization_test_inputs(TEST_INPUTS, TEST_CONFIG) +# setup_time_series_test_inputs(TEST_INPUTS, TEST_CONFIG) +# setup_embeddings_test_inputs(TEST_INPUTS, TEST_CONFIG) +# setup_clustering_test_inputs(TEST_INPUTS, TEST_CONFIG) + +# custom_test_ids = os.environ.get("TEST_IDS") +# custom_test_ids = custom_test_ids.split(",") if custom_test_ids else None +# tests_to_run = list_tests(pretty=False) if not custom_test_ids else custom_test_ids + +# # allow filtering tests by wildcard using fnmatch +# # e.g. only run tests that start with "validmind.data_validation" +# # TEST_PATTERN="validmind.data_validation*" +# test_pattern = os.environ.get("TEST_PATTERN") +# if test_pattern: +# tests_to_run = [ +# test_id for test_id in tests_to_run if fnmatch(test_id, test_pattern) +# ] + +# for vm_test_id in tqdm(sorted(tests_to_run)): +# # Only skip known failing tests if we're not running a custom set of tests +# if custom_test_ids is None and vm_test_id in KNOWN_FAILING_TESTS: +# logger.debug("Skipping known failing test %s", vm_test_id) +# continue + +# # load the test class +# test_func = load_test(vm_test_id) + +# # create a unit test function for the test class +# unit_test_func = create_unit_test_func(vm_test_id, test_func) +# unit_test_func_name = f'test_{vm_test_id.replace(".", "_")}' + +# # add the unit test function to the unit test class +# setattr(TestRunTest, f"test_{unit_test_func_name}", unit_test_func) + +# # create a test summary function. the zzz is to ensure it runs last +# test_summary_func = create_test_summary_func() +# setattr(TestRunTest, "test_zzz_summary", test_summary_func) + + +# create_unit_test_funcs_from_vm_tests() + + +# if __name__ == "__main__": +# unittest.main() diff --git a/tests/unit_tests/data_validation/nlp/test_CommonWords.py b/tests/unit_tests/data_validation/nlp/test_CommonWords.py index 1fd78a859..2af17ca05 100644 --- a/tests/unit_tests/data_validation/nlp/test_CommonWords.py +++ b/tests/unit_tests/data_validation/nlp/test_CommonWords.py @@ -2,6 +2,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData from validmind.tests.data_validation.nlp.CommonWords import CommonWords @@ -37,9 +38,9 @@ def setUp(self): __log=False, ) - def test_returns_plotly_figure(self): + def test_returns_plotly_figure_and_raw_data(self): # Run the function - result = CommonWords(self.vm_dataset) + result, raw_data = CommonWords(self.vm_dataset) # Check if result is a Plotly Figure self.assertIsInstance(result, go.Figure) @@ -53,8 +54,11 @@ def test_returns_plotly_figure(self): self.assertIsNotNone(result.layout.xaxis.title) self.assertIsNotNone(result.layout.yaxis.title) + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) + def test_common_words_content(self): - result = CommonWords(self.vm_dataset) + result, _ = CommonWords(self.vm_dataset) # Get the words from the bar chart words = result.data[0].x @@ -75,7 +79,7 @@ def test_common_words_content(self): self.assertNotIn("over", words) def test_word_frequencies(self): - result = CommonWords(self.vm_dataset) + result, _ = CommonWords(self.vm_dataset) # Get the words and their frequencies words = list(result.data[0].x) diff --git a/tests/unit_tests/data_validation/nlp/test_LanguageDetection.py b/tests/unit_tests/data_validation/nlp/test_LanguageDetection.py index b9fe195b7..f767a52ef 100644 --- a/tests/unit_tests/data_validation/nlp/test_LanguageDetection.py +++ b/tests/unit_tests/data_validation/nlp/test_LanguageDetection.py @@ -37,26 +37,31 @@ def setUp(self): __log=False, ) - def test_returns_plotly_figure(self): + def test_returns_plotly_figure_and_raw_data(self): # Run the function result = LanguageDetection(self.vm_dataset) + fig, raw_data = result - # Check if result is a Plotly Figure - self.assertIsInstance(result, go.Figure) + # Check if the first result is a Plotly Figure + self.assertIsInstance(fig, go.Figure) # Should have one trace (histogram) - self.assertEqual(len(result.data), 1) - self.assertEqual(result.data[0].type, "histogram") + self.assertEqual(len(fig.data), 1) + self.assertEqual(fig.data[0].type, "histogram") # Should have a title and axis labels - self.assertIsNotNone(result.layout.title) - self.assertIsNotNone(result.layout.xaxis.title) + self.assertIsNotNone(fig.layout.title) + self.assertIsNotNone(fig.layout.xaxis.title) + + # Check if the second result is an instance of RawData + self.assertIsInstance(raw_data, vm.RawData) def test_language_detection(self): result = LanguageDetection(self.vm_dataset) + fig, _ = result # Get the detected languages from the histogram - languages = result.data[0].x + languages = fig.data[0].x # Check that expected languages are present self.assertTrue("en" in languages[0]) # English diff --git a/tests/unit_tests/data_validation/nlp/test_PolarityAndSubjectivity.py b/tests/unit_tests/data_validation/nlp/test_PolarityAndSubjectivity.py index cfabe8a18..da71773e9 100644 --- a/tests/unit_tests/data_validation/nlp/test_PolarityAndSubjectivity.py +++ b/tests/unit_tests/data_validation/nlp/test_PolarityAndSubjectivity.py @@ -39,13 +39,13 @@ def setUp(self): __log=False, ) - def test_returns_plotly_figure_and_tables(self): + def test_returns_plotly_figure_and_tables_and_raw_data(self): # Run the function result = PolarityAndSubjectivity(self.vm_dataset) - # Check if result is a tuple of (Figure, dict) + # Check if result is a tuple of (Figure, dict, RawData) self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 2) + self.assertEqual(len(result), 3) # Check the figure fig = result[0] @@ -66,6 +66,10 @@ def test_returns_plotly_figure_and_tables(self): self.assertIn("Quadrant Distribution", tables) self.assertIn("Statistics", tables) + # Check the raw data + raw_data = result[2] + self.assertIsInstance(raw_data, vm.RawData) + def test_polarity_and_subjectivity_values(self): result = PolarityAndSubjectivity(self.vm_dataset) stats_df = result[1]["Statistics"] diff --git a/tests/unit_tests/data_validation/nlp/test_Punctuations.py b/tests/unit_tests/data_validation/nlp/test_Punctuations.py index c0f702bdb..790673251 100644 --- a/tests/unit_tests/data_validation/nlp/test_Punctuations.py +++ b/tests/unit_tests/data_validation/nlp/test_Punctuations.py @@ -2,7 +2,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm -from validmind.tests.data_validation.nlp.Punctuations import Punctuations +from validmind.tests.data_validation.nlp.Punctuations import Punctuations, RawData class TestPunctuations(unittest.TestCase): @@ -39,26 +39,29 @@ def setUp(self): def test_returns_plotly_figure(self): # Run the function with default token mode - result = Punctuations(self.vm_dataset) + fig, raw_data = Punctuations(self.vm_dataset) # Check if result is a Plotly Figure - self.assertIsInstance(result, go.Figure) + self.assertIsInstance(fig, go.Figure) # Should have one trace (bar chart) - self.assertEqual(len(result.data), 1) - self.assertEqual(result.data[0].type, "bar") + self.assertEqual(len(fig.data), 1) + self.assertEqual(fig.data[0].type, "bar") # Should have a title and axis labels - self.assertIsNotNone(result.layout.title) - self.assertIsNotNone(result.layout.xaxis.title) - self.assertIsNotNone(result.layout.yaxis.title) + self.assertIsNotNone(fig.layout.title) + self.assertIsNotNone(fig.layout.xaxis.title) + self.assertIsNotNone(fig.layout.yaxis.title) + + # Check that raw_data is instance of RawData + self.assertIsInstance(raw_data, RawData) def test_token_mode_counting(self): - result = Punctuations(self.vm_dataset, count_mode="token") + fig, raw_data = Punctuations(self.vm_dataset, count_mode="token") # Get the punctuation marks and their counts - punctuation_marks = result.data[0].x - counts = result.data[0].y + punctuation_marks = fig.data[0].x + counts = fig.data[0].y # Convert to dict for easier testing punct_counts = dict(zip(punctuation_marks, counts)) @@ -69,12 +72,15 @@ def test_token_mode_counting(self): self.assertEqual(punct_counts["!"], 0) # Zero exclamation marks self.assertEqual(punct_counts["?"], 0) # Zero question marks + # Check that raw_data is instance of RawData + self.assertIsInstance(raw_data, RawData) + def test_word_mode_counting(self): - result = Punctuations(self.vm_dataset, count_mode="word") + fig, raw_data = Punctuations(self.vm_dataset, count_mode="word") # Get the punctuation marks and their counts - punctuation_marks = result.data[0].x - counts = result.data[0].y + punctuation_marks = fig.data[0].x + counts = fig.data[0].y # Convert to dict for easier testing punct_counts = dict(zip(punctuation_marks, counts)) @@ -83,6 +89,9 @@ def test_word_mode_counting(self): self.assertTrue(punct_counts["-"] > 0) # Should count hyphen in "Semi-colons" self.assertEqual(punct_counts['"'], 2) # Two quote marks + # Check that raw_data is instance of RawData + self.assertIsInstance(raw_data, RawData) + def test_invalid_count_mode(self): # Check if ValueError is raised for invalid count_mode with self.assertRaises(ValueError): diff --git a/tests/unit_tests/data_validation/nlp/test_Sentiment.py b/tests/unit_tests/data_validation/nlp/test_Sentiment.py index 4c2f84cb7..48707647b 100644 --- a/tests/unit_tests/data_validation/nlp/test_Sentiment.py +++ b/tests/unit_tests/data_validation/nlp/test_Sentiment.py @@ -3,6 +3,7 @@ import matplotlib.pyplot as plt import validmind as vm from validmind.tests.data_validation.nlp.Sentiment import Sentiment +from validmind import RawData class TestSentiment(unittest.TestCase): @@ -39,21 +40,27 @@ def setUp(self): __log=False, ) - def test_returns_matplotlib_figure(self): + def test_returns_matplotlib_figure_and_raw_data(self): # Run the function result = Sentiment(self.vm_dataset) - # Check if result is a matplotlib Figure - self.assertIsInstance(result, plt.Figure) + # Separate results + fig, raw_data = result + + # Check if result includes a matplotlib Figure + self.assertIsInstance(fig, plt.Figure) # Check if figure has an axes - self.assertTrue(len(result.axes) > 0) + self.assertTrue(len(fig.axes) > 0) # Check if axes has a title and labels - ax = result.axes[0] + ax = fig.axes[0] self.assertIsNotNone(ax.get_title()) self.assertIsNotNone(ax.get_xlabel()) + # Check if result includes RawData + self.assertIsInstance(raw_data, RawData) + def test_sentiment_range(self): from nltk.sentiment import SentimentIntensityAnalyzer diff --git a/tests/unit_tests/data_validation/nlp/test_Toxicity.py b/tests/unit_tests/data_validation/nlp/test_Toxicity.py index 5bc84ae78..b17a7f598 100644 --- a/tests/unit_tests/data_validation/nlp/test_Toxicity.py +++ b/tests/unit_tests/data_validation/nlp/test_Toxicity.py @@ -3,6 +3,7 @@ import matplotlib.pyplot as plt import validmind as vm from validmind.tests.data_validation.nlp.Toxicity import Toxicity +from validmind import RawData class TestToxicity(unittest.TestCase): @@ -39,21 +40,28 @@ def setUp(self): __log=False, ) - def test_returns_matplotlib_figure(self): + def test_returns_matplotlib_figure_and_raw_data(self): # Run the function result = Toxicity(self.vm_dataset) - # Check if result is a matplotlib Figure - self.assertIsInstance(result, plt.Figure) + # Check if result is a tuple with two elements + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + + # Check if the first element is a matplotlib Figure + self.assertIsInstance(result[0], plt.Figure) # Check if figure has an axes - self.assertTrue(len(result.axes) > 0) + self.assertTrue(len(result[0].axes) > 0) # Check if axes has a title and labels - ax = result.axes[0] + ax = result[0].axes[0] self.assertIsNotNone(ax.get_title()) self.assertIsNotNone(ax.get_xlabel()) + # Check if the second element is an instance of RawData + self.assertIsInstance(result[1], RawData) + def test_toxicity_range(self): import evaluate diff --git a/tests/unit_tests/data_validation/test_ACFandPACFPlot.py b/tests/unit_tests/data_validation/test_ACFandPACFPlot.py index 987280977..a4fa66606 100644 --- a/tests/unit_tests/data_validation/test_ACFandPACFPlot.py +++ b/tests/unit_tests/data_validation/test_ACFandPACFPlot.py @@ -3,6 +3,7 @@ import validmind as vm from validmind.tests.data_validation.ACFandPACFPlot import ACFandPACFPlot from plotly.graph_objects import Figure +from validmind import RawData class TestACFandPACFPlot(unittest.TestCase): @@ -27,16 +28,16 @@ def test_returns_expected_figures(self): # Run the function result = ACFandPACFPlot(self.vm_dataset) - # Check if result is a tuple - self.assertIsInstance(result, tuple) + # Should return 6 items (ACF and PACF for each column + raw data) + self.assertEqual(len(result), 5) - # Should return 4 figures (ACF and PACF for each column) - self.assertEqual(len(result), 4) - - # Check if all elements are Plotly figures - for figure in result: + # Check if the first 4 elements are Plotly figures + for figure in result[:-1]: self.assertIsInstance(figure, Figure) + # Check last element is of type RawData + self.assertIsInstance(result[-1], RawData) + def test_raises_error_for_non_datetime_index(self): # Create dataset with non-datetime index df_wrong_index = self.df.reset_index() @@ -66,5 +67,8 @@ def test_handles_nan_values(self): # Should run without errors result = ACFandPACFPlot(vm_dataset_with_nans) - # Should still return 4 figures - self.assertEqual(len(result), 4) + # Should still return 6 items (ACF and PACF per column + raw data) + self.assertEqual(len(result), 5) + + # Check if last element is RawData + self.assertIsInstance(result[-1], RawData) diff --git a/tests/unit_tests/data_validation/test_AutoMA.py b/tests/unit_tests/data_validation/test_AutoMA.py index 16de4bb8b..ba42a9605 100644 --- a/tests/unit_tests/data_validation/test_AutoMA.py +++ b/tests/unit_tests/data_validation/test_AutoMA.py @@ -3,6 +3,7 @@ import numpy as np import validmind as vm from validmind.tests.data_validation.AutoMA import AutoMA +from validmind import RawData class TestAutoMA(unittest.TestCase): @@ -41,7 +42,7 @@ def setUp(self): def test_returns_expected_structure(self): # Run the function - result = AutoMA(self.vm_dataset, max_ma_order=3) + result, raw_data = AutoMA(self.vm_dataset, max_ma_order=3) # Check if result is a dictionary with expected keys self.assertIsInstance(result, dict) @@ -61,8 +62,11 @@ def test_returns_expected_structure(self): list(result["Best MA Order Results"].columns), expected_columns ) + # Check raw data is instance of RawData + self.assertIsInstance(raw_data, RawData) + def test_ma_order_detection(self): - result = AutoMA(self.vm_dataset, max_ma_order=3) + result, _ = AutoMA(self.vm_dataset, max_ma_order=3) best_orders = result["Best MA Order Results"] # Get best MA orders for each process @@ -80,7 +84,7 @@ def test_ma_order_detection(self): def test_max_ma_order_parameter(self): max_order = 2 - result = AutoMA(self.vm_dataset, max_ma_order=max_order) + result, _ = AutoMA(self.vm_dataset, max_ma_order=max_order) analysis_results = result["Auto MA Analysis Results"] # Check that no MA order exceeds the maximum @@ -88,4 +92,4 @@ def test_max_ma_order_parameter(self): def test_handles_nan_values(self): # Should run without errors despite NaN values - result = AutoMA(self.vm_dataset, max_ma_order=3) + result, _ = AutoMA(self.vm_dataset, max_ma_order=3) diff --git a/tests/unit_tests/data_validation/test_BivariateScatterPlots.py b/tests/unit_tests/data_validation/test_BivariateScatterPlots.py index fb685f609..409869a91 100644 --- a/tests/unit_tests/data_validation/test_BivariateScatterPlots.py +++ b/tests/unit_tests/data_validation/test_BivariateScatterPlots.py @@ -2,6 +2,7 @@ import pandas as pd import validmind as vm from validmind.tests.data_validation.BivariateScatterPlots import BivariateScatterPlots +from validmind import RawData class TestBivariateScatterPlots(unittest.TestCase): @@ -26,5 +27,8 @@ def test_returns_tuple_of_figures(self): # Check if result is a tuple self.assertIsInstance(result, tuple) - # Check if the tuple contains at least one figure (since we have multiple numeric columns) + # Check if the tuple contains at least one figure and raw data (since we have multiple numeric columns) self.assertTrue(len(result) > 0) + + # Check the raw data is an instance of RawData + self.assertTrue(isinstance(result[-1], RawData)) diff --git a/tests/unit_tests/data_validation/test_BoxPierce.py b/tests/unit_tests/data_validation/test_BoxPierce.py index 0bef33f81..ddc1ac18f 100644 --- a/tests/unit_tests/data_validation/test_BoxPierce.py +++ b/tests/unit_tests/data_validation/test_BoxPierce.py @@ -2,6 +2,7 @@ import pandas as pd import validmind as vm from validmind.tests.data_validation.BoxPierce import BoxPierce +from validmind import RawData class TestBoxPierce(unittest.TestCase): @@ -20,7 +21,7 @@ def setUp(self): def test_returns_dataframe_with_expected_columns(self): # Run the function - result = BoxPierce(self.vm_dataset) + result, raw_data = BoxPierce(self.vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -31,3 +32,10 @@ def test_returns_dataframe_with_expected_columns(self): # Check if the DataFrame has the expected number of rows (one for each input column) self.assertEqual(len(result), len(self.df.columns)) + + def test_returns_raw_data_as_rawdata_instance(self): + # Run the function + _, raw_data = BoxPierce(self.vm_dataset) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_ClassImbalance.py b/tests/unit_tests/data_validation/test_ClassImbalance.py index e4bf0dc7b..4d3de0c26 100644 --- a/tests/unit_tests/data_validation/test_ClassImbalance.py +++ b/tests/unit_tests/data_validation/test_ClassImbalance.py @@ -1,6 +1,7 @@ import unittest import pandas as pd import validmind as vm +from validmind import RawData from validmind.errors import SkipTestError from validmind.tests.data_validation.ClassImbalance import ClassImbalance from plotly.graph_objs import Figure @@ -35,7 +36,7 @@ def setUp(self): ) def test_balanced_classes(self): - results, figure, passed = ClassImbalance( + results, figure, passed, raw_data = ClassImbalance( self.balanced_dataset, min_percent_threshold=20 ) @@ -43,6 +44,7 @@ def test_balanced_classes(self): self.assertIsInstance(results, dict) self.assertIsInstance(figure, Figure) self.assertIsInstance(passed, bool) + self.assertIsInstance(raw_data, RawData) # Check results for balanced dataset imbalance_data = results["target Class Imbalance"] @@ -53,10 +55,13 @@ def test_balanced_classes(self): self.assertTrue(passed) # Overall test should pass def test_imbalanced_classes(self): - results, figure, passed = ClassImbalance( + results, figure, passed, raw_data = ClassImbalance( self.imbalanced_dataset, min_percent_threshold=20 ) + # Check return type for raw data + self.assertIsInstance(raw_data, RawData) + imbalance_data = results["target Class Imbalance"] # Class B should fail (17% < 20%) @@ -66,7 +71,7 @@ def test_imbalanced_classes(self): def test_custom_threshold(self): # With threshold of 10%, both classes should pass even in imbalanced dataset - results, figure, passed = ClassImbalance( + results, figure, passed, raw_data = ClassImbalance( self.imbalanced_dataset, min_percent_threshold=10 ) self.assertTrue(passed) diff --git a/tests/unit_tests/data_validation/test_DatasetDescription.py b/tests/unit_tests/data_validation/test_DatasetDescription.py index e27cab7b0..16b9f2b9a 100644 --- a/tests/unit_tests/data_validation/test_DatasetDescription.py +++ b/tests/unit_tests/data_validation/test_DatasetDescription.py @@ -3,6 +3,7 @@ import numpy as np import validmind as vm from validmind.tests.data_validation.DatasetDescription import DatasetDescription +from validmind import RawData class TestDatasetDescription(unittest.TestCase): @@ -61,14 +62,20 @@ def setUp(self): def test_returns_expected_structure(self): result = DatasetDescription(self.vm_dataset) - # Check if result is a dictionary with expected key - self.assertIsInstance(result, dict) - self.assertIn("Dataset Description", result) + # Check if result is a tuple with expected structure (Dataset Description, RawData) + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + + description, raw_data = result + + # Check if description is a dictionary with expected key + self.assertIsInstance(description, dict) + self.assertIn("Dataset Description", description) # Check if description is a list of dictionaries - description = result["Dataset Description"] - self.assertIsInstance(description, list) - self.assertTrue(all(isinstance(item, dict) for item in description)) + description_list = description["Dataset Description"] + self.assertIsInstance(description_list, list) + self.assertTrue(all(isinstance(item, dict) for item in description_list)) # Check if each column description has required fields # Note: Count is not included as it's not available for Null type columns @@ -81,21 +88,26 @@ def test_returns_expected_structure(self): "Distinct", "Distinct %", ] - for item in description: + for item in description_list: for field in required_fields: self.assertIn(field, item) self.assertIsNotNone(item[field]) # Check Count field separately as it's not available for Null columns - for item in description: + for item in description_list: if item["Type"] != "Null": self.assertIn("Count", item) self.assertIsNotNone(item["Count"]) + # Check raw_data is instance of RawData + self.assertIsInstance(raw_data, RawData) + def test_column_types_and_stats(self): result = DatasetDescription(self.vm_dataset) - description = result["Dataset Description"] - column_info = {item["Name"]: item for item in description} + description, _ = result + column_info = { + item["Name"]: item for item in description["Dataset Description"] + } # Check numeric column self.assertEqual(column_info["numeric"]["Type"], "Numeric") diff --git a/tests/unit_tests/data_validation/test_DatasetSplit.py b/tests/unit_tests/data_validation/test_DatasetSplit.py index eaa63463c..63d3257d9 100644 --- a/tests/unit_tests/data_validation/test_DatasetSplit.py +++ b/tests/unit_tests/data_validation/test_DatasetSplit.py @@ -1,6 +1,7 @@ import unittest import pandas as pd import validmind as vm +from validmind import RawData from validmind.tests.data_validation.DatasetSplit import DatasetSplit @@ -32,14 +33,16 @@ def setUp(self): def test_dataset_split_proportions(self): # Run DatasetSplit - result = DatasetSplit([self.train_dataset, self.test_dataset, self.val_dataset]) + table, raw_data = DatasetSplit( + [self.train_dataset, self.test_dataset, self.val_dataset] + ) # Verify the structure of the result - self.assertIsInstance(result, list) - self.assertEqual(len(result), 4) # 3 datasets + total + self.assertIsInstance(table, list) + self.assertEqual(len(table), 4) # 3 datasets + total # Create a dictionary for easier testing - result_dict = {item["Dataset"]: item for item in result} + result_dict = {item["Dataset"]: item for item in table} # Test total size self.assertEqual(result_dict["Total"]["Size"], 100) @@ -55,12 +58,15 @@ def test_dataset_split_proportions(self): self.assertEqual(result_dict["validation_ds"]["Size"], 20) self.assertEqual(result_dict["validation_ds"]["Proportion"], "20.00%") + # Verify raw data + self.assertIsInstance(raw_data, RawData) + def test_dataset_split_with_none(self): # Test with some datasets being None - result = DatasetSplit([self.train_dataset, None, self.test_dataset]) + table, raw_data = DatasetSplit([self.train_dataset, None, self.test_dataset]) # Create a dictionary for easier testing - result_dict = {item["Dataset"]: item for item in result} + result_dict = {item["Dataset"]: item for item in table} # Test total size self.assertEqual(result_dict["Total"]["Size"], 80) @@ -72,3 +78,6 @@ def test_dataset_split_with_none(self): self.assertEqual(result_dict["test_ds"]["Size"], 20) self.assertEqual(result_dict["test_ds"]["Proportion"], "25.00%") + + # Verify raw data + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_DickeyFullerGLS.py b/tests/unit_tests/data_validation/test_DickeyFullerGLS.py index 5220fabf8..1ce578ed7 100644 --- a/tests/unit_tests/data_validation/test_DickeyFullerGLS.py +++ b/tests/unit_tests/data_validation/test_DickeyFullerGLS.py @@ -4,6 +4,7 @@ import validmind as vm from validmind.tests.data_validation.DickeyFullerGLS import DickeyFullerGLS from validmind.errors import SkipTestError +from validmind import RawData class TestDickeyFullerGLS(unittest.TestCase): @@ -37,12 +38,15 @@ def setUp(self): ) def test_dfgls_structure(self): - result = DickeyFullerGLS(self.vm_dataset) + result, raw_data = DickeyFullerGLS(self.vm_dataset) # Check basic structure self.assertIn("DFGLS Test Results", result) self.assertIsInstance(result["DFGLS Test Results"], list) + # Check raw data + self.assertIsInstance(raw_data, RawData) + # Check results for each variable for var_result in result["DFGLS Test Results"]: self.assertIn("Variable", var_result) @@ -52,7 +56,7 @@ def test_dfgls_structure(self): self.assertIn("nobs", var_result) def test_dfgls_values(self): - result = DickeyFullerGLS(self.vm_dataset) + result, _ = DickeyFullerGLS(self.vm_dataset) results_dict = {item["Variable"]: item for item in result["DFGLS Test Results"]} # Stationary series should have lower p-value diff --git a/tests/unit_tests/data_validation/test_FeatureTargetCorrelationPlot.py b/tests/unit_tests/data_validation/test_FeatureTargetCorrelationPlot.py index 40d7e9ebf..119e6f8aa 100644 --- a/tests/unit_tests/data_validation/test_FeatureTargetCorrelationPlot.py +++ b/tests/unit_tests/data_validation/test_FeatureTargetCorrelationPlot.py @@ -27,12 +27,21 @@ def setUp(self): __log=False, ) - def test_returns_plotly_figure(self): + def test_returns_plotly_figure_and_raw_data(self): # Run the function result = FeatureTargetCorrelationPlot(self.vm_dataset) - # Check if result is a Plotly Figure - self.assertIsInstance(result, go.Figure) + # Check if result is a tuple of Plotly Figure and RawData + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 2) + + fig, raw_data = result + + # Check if fig is a Plotly Figure + self.assertIsInstance(fig, go.Figure) # Check if the figure has data (at least one trace) - self.assertTrue(len(result.data) > 0) + self.assertTrue(len(fig.data) > 0) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, vm.RawData) diff --git a/tests/unit_tests/data_validation/test_HighCardinality.py b/tests/unit_tests/data_validation/test_HighCardinality.py index 9ebf0a66b..f8c68a26b 100644 --- a/tests/unit_tests/data_validation/test_HighCardinality.py +++ b/tests/unit_tests/data_validation/test_HighCardinality.py @@ -23,11 +23,12 @@ def setUp(self): ) def test_cardinality_structure(self): - results, all_passed = HighCardinality(self.vm_dataset) + results, all_passed, raw_data = HighCardinality(self.vm_dataset) # Check basic structure self.assertIsInstance(results, list) self.assertIsInstance(all_passed, bool) + self.assertIsInstance(raw_data, vm.RawData) # Check that results include both columns column_names = [result["Column"] for result in results] @@ -35,7 +36,7 @@ def test_cardinality_structure(self): self.assertIn("high_cardinality", column_names) def test_cardinality_values(self): - results, _ = HighCardinality(self.vm_dataset) + results, _, _ = HighCardinality(self.vm_dataset) # Convert results to dictionary for easier testing results_dict = {result["Column"]: result for result in results} @@ -60,13 +61,13 @@ def test_cardinality_values(self): def test_all_passed_flag(self): # Default thresholds should result in not all passing - _, all_passed_default = HighCardinality(self.vm_dataset) + _, all_passed_default, _ = HighCardinality(self.vm_dataset) # Only test the default case self.assertFalse(all_passed_default) def test_numeric_columns_ignored(self): - results, _ = HighCardinality(self.vm_dataset) + results, _, _ = HighCardinality(self.vm_dataset) column_names = [result["Column"] for result in results] # Numeric column should not be in results diff --git a/tests/unit_tests/data_validation/test_HighPearsonCorrelation.py b/tests/unit_tests/data_validation/test_HighPearsonCorrelation.py index d59a29864..925d5cc39 100644 --- a/tests/unit_tests/data_validation/test_HighPearsonCorrelation.py +++ b/tests/unit_tests/data_validation/test_HighPearsonCorrelation.py @@ -45,11 +45,12 @@ def setUp(self): ) def test_correlation_structure(self): - results, all_passed = HighPearsonCorrelation(self.vm_dataset) + results, all_passed, raw_data = HighPearsonCorrelation(self.vm_dataset) # Check basic structure self.assertIsInstance(results, list) self.assertIsInstance(all_passed, bool) + self.assertIsInstance(raw_data, vm.RawData) # Check result structure for result in results: @@ -58,7 +59,7 @@ def test_correlation_structure(self): self.assertIn("Pass/Fail", result) def test_correlation_values(self): - results, _ = HighPearsonCorrelation(self.vm_dataset, max_threshold=0.5) + results, _, _ = HighPearsonCorrelation(self.vm_dataset, max_threshold=0.5) # First result should be the perfect correlation perfect_corr = results[0] @@ -78,7 +79,7 @@ def test_correlation_values(self): self.assertTrue(moderate_found) def test_categorical_exclusion(self): - results, _ = HighPearsonCorrelation(self.vm_dataset) + results, _, _ = HighPearsonCorrelation(self.vm_dataset) # Verify categorical column is not in results for result in results: diff --git a/tests/unit_tests/data_validation/test_IQROutliersBarPlot.py b/tests/unit_tests/data_validation/test_IQROutliersBarPlot.py index fc807cdd0..8bf4093bf 100644 --- a/tests/unit_tests/data_validation/test_IQROutliersBarPlot.py +++ b/tests/unit_tests/data_validation/test_IQROutliersBarPlot.py @@ -4,6 +4,7 @@ import validmind as vm from validmind.tests.data_validation.IQROutliersBarPlot import IQROutliersBarPlot import plotly.graph_objects as go +from validmind import RawData class TestIQROutliersBarPlot(unittest.TestCase): @@ -38,13 +39,18 @@ def setUp(self): ) def test_plot_structure(self): - figures = IQROutliersBarPlot(self.vm_dataset) + results = IQROutliersBarPlot(self.vm_dataset) # Check return type - self.assertIsInstance(figures, tuple) + self.assertIsInstance(results, tuple) + + raw_data = results[-1] + + # Check raw data + self.assertIsInstance(raw_data, RawData) # Check each figure - for fig in figures: + for fig in results[:-1]: self.assertIsInstance(fig, go.Figure) # Check figure components @@ -52,14 +58,15 @@ def test_plot_structure(self): self.assertIsInstance(fig.data[0], go.Bar) def test_plot_data(self): - figures = IQROutliersBarPlot(self.vm_dataset) + results = IQROutliersBarPlot(self.vm_dataset) # Should have at least one figure (for the column with outliers) - self.assertGreater(len(figures), 0) + self.assertGreater(len(results), 1) # Find the figure for 'with_outliers' column outliers_fig = next( - (fig for fig in figures if fig.layout.title.text == "with_outliers"), None + (fig for fig in results[:-1] if fig.layout.title.text == "with_outliers"), + None, ) self.assertIsNotNone(outliers_fig) @@ -69,8 +76,8 @@ def test_plot_data(self): self.assertGreater(sum(bar_data), 0) # Should have some outliers def test_binary_exclusion(self): - figures = IQROutliersBarPlot(self.vm_dataset) + results = IQROutliersBarPlot(self.vm_dataset) - # Check that binary column is not included - figure_titles = [fig.layout.title.text for fig in figures] + # Check that binary column is not included in figures + figure_titles = [fig.layout.title.text for fig in results[:-1]] self.assertNotIn("binary", figure_titles) diff --git a/tests/unit_tests/data_validation/test_IsolationForestOutliers.py b/tests/unit_tests/data_validation/test_IsolationForestOutliers.py index 38bdc53c9..9d67cb7a6 100644 --- a/tests/unit_tests/data_validation/test_IsolationForestOutliers.py +++ b/tests/unit_tests/data_validation/test_IsolationForestOutliers.py @@ -6,6 +6,7 @@ IsolationForestOutliers, ) import matplotlib.pyplot as plt +from validmind import RawData class TestIsolationForestOutliers(unittest.TestCase): @@ -28,10 +29,13 @@ def setUp(self): ) def test_outliers_detection(self): - figures = IsolationForestOutliers(self.vm_dataset, contamination=0.1) + result = IsolationForestOutliers(self.vm_dataset, contamination=0.1) # Check return type - self.assertIsInstance(figures, tuple) + self.assertIsInstance(result, tuple) + + # Separate figures and raw data + *figures, raw_data = result # Check that at least one figure is returned self.assertGreater(len(figures), 0) @@ -40,6 +44,9 @@ def test_outliers_detection(self): for fig in figures: self.assertIsInstance(fig, plt.Figure) + # Check raw data + self.assertIsInstance(raw_data, RawData) + def test_feature_columns_validation(self): # Test with valid feature columns try: diff --git a/tests/unit_tests/data_validation/test_JarqueBera.py b/tests/unit_tests/data_validation/test_JarqueBera.py index 734b69b6d..6bf29a3c1 100644 --- a/tests/unit_tests/data_validation/test_JarqueBera.py +++ b/tests/unit_tests/data_validation/test_JarqueBera.py @@ -1,11 +1,11 @@ import unittest import pandas as pd import validmind as vm -from validmind.tests.data_validation.JarqueBera import JarqueBera +from validmind.tests.data_validation.JarqueBera import JarqueBera, RawData class TestJarqueBera(unittest.TestCase): - def test_returns_dataframe_with_expected_shape(self): + def test_returns_dataframe_and_rawdata(self): # Create a simple dataset with numeric columns df = pd.DataFrame( { @@ -29,7 +29,7 @@ def test_returns_dataframe_with_expected_shape(self): ) # Run the function - result = JarqueBera(vm_dataset) + result, raw_data = JarqueBera(vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -40,3 +40,6 @@ def test_returns_dataframe_with_expected_shape(self): # Check if the DataFrame has the expected number of rows (one for each numeric feature) self.assertEqual(len(result), len(vm_dataset.feature_columns_numeric)) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_KPSS.py b/tests/unit_tests/data_validation/test_KPSS.py index 8d6827350..6f4e7b1c6 100644 --- a/tests/unit_tests/data_validation/test_KPSS.py +++ b/tests/unit_tests/data_validation/test_KPSS.py @@ -3,6 +3,7 @@ import numpy as np import validmind as vm from validmind.tests.data_validation.KPSS import KPSS +from validmind import RawData class TestKPSS(unittest.TestCase): @@ -33,7 +34,7 @@ def setUp(self): ) def test_kpss_structure(self): - result = KPSS(self.vm_dataset) + result, raw_data = KPSS(self.vm_dataset) # Check basic structure self.assertIsInstance(result, dict) @@ -50,8 +51,11 @@ def test_kpss_structure(self): self.assertIn("usedlag", column_result) self.assertIn("critical_values", column_result) + # Check raw data instance + self.assertIsInstance(raw_data, RawData) + def test_kpss_results(self): - result = KPSS(self.vm_dataset) + result, _ = KPSS(self.vm_dataset) kpss_results = result["KPSS Test Results"] # Get results for each series @@ -74,7 +78,7 @@ def test_kpss_results(self): self.assertLess(stationary_result["stat"], non_stationary_result["stat"]) def test_critical_values(self): - result = KPSS(self.vm_dataset) + result, _ = KPSS(self.vm_dataset) kpss_results = result["KPSS Test Results"] for column_result in kpss_results: diff --git a/tests/unit_tests/data_validation/test_LJungBox.py b/tests/unit_tests/data_validation/test_LJungBox.py index 3880dcf3d..974ec64f4 100644 --- a/tests/unit_tests/data_validation/test_LJungBox.py +++ b/tests/unit_tests/data_validation/test_LJungBox.py @@ -2,6 +2,7 @@ import pandas as pd import validmind as vm from validmind.tests.data_validation.LJungBox import LJungBox +from validmind import RawData class TestLJungBox(unittest.TestCase): @@ -22,7 +23,7 @@ def test_returns_dataframe_with_expected_shape(self): ) # Run the function - result = LJungBox(vm_dataset) + result, raw_data = LJungBox(vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -33,3 +34,6 @@ def test_returns_dataframe_with_expected_shape(self): # Check if the DataFrame has the expected number of rows (one for each column) self.assertEqual(len(result), len(df.columns)) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_LaggedCorrelationHeatmap.py b/tests/unit_tests/data_validation/test_LaggedCorrelationHeatmap.py index 8f59f537a..e57114ca0 100644 --- a/tests/unit_tests/data_validation/test_LaggedCorrelationHeatmap.py +++ b/tests/unit_tests/data_validation/test_LaggedCorrelationHeatmap.py @@ -6,6 +6,7 @@ LaggedCorrelationHeatmap, ) import plotly.graph_objects as go +from validmind import RawData class TestLaggedCorrelationHeatmap(unittest.TestCase): @@ -32,9 +33,9 @@ def setUp(self): ) def test_heatmap_structure(self): - fig = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) + fig, raw_data = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) - # Check return type + # Check return type for fig self.assertIsInstance(fig, go.Figure) # Check figure has data @@ -47,7 +48,7 @@ def test_heatmap_structure(self): self.assertEqual(fig.layout.xaxis.title.text, "Lags") def test_correlation_values(self): - fig = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) + fig, _ = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) # Get correlation values from heatmap heatmap_data = fig.data[0] @@ -68,15 +69,15 @@ def test_correlation_values(self): def test_num_lags_parameter(self): # Test with different number of lags - fig_small = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=3) - fig_large = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=8) + fig_small, _ = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=3) + fig_large, _ = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=8) # Check dimensions match num_lags parameter self.assertEqual(len(fig_small.data[0].x), 4) # num_lags + 1 self.assertEqual(len(fig_large.data[0].x), 9) # num_lags + 1 def test_correlation_pattern(self): - fig = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) + fig, _ = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) # Get correlation values for feature1 correlations = fig.data[0].z[0] @@ -84,3 +85,9 @@ def test_correlation_pattern(self): # Check that lag 2 has higher correlation than lag 0 # (since we created target with 2-period lag) self.assertGreater(abs(correlations[2]), abs(correlations[0])) + + def test_raw_data_output(self): + _, raw_data = LaggedCorrelationHeatmap(self.vm_dataset, num_lags=5) + + # Check that raw_data is instance of RawData + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_MissingValuesBarPlot.py b/tests/unit_tests/data_validation/test_MissingValuesBarPlot.py index 1905cdf52..bd2f7aa82 100644 --- a/tests/unit_tests/data_validation/test_MissingValuesBarPlot.py +++ b/tests/unit_tests/data_validation/test_MissingValuesBarPlot.py @@ -4,6 +4,7 @@ import validmind as vm from validmind.tests.data_validation.MissingValuesBarPlot import MissingValuesBarPlot import plotly.graph_objects as go +from validmind import RawData class TestMissingValuesBarPlot(unittest.TestCase): @@ -30,13 +31,16 @@ def setUp(self): ) def test_figure_structure(self): - fig = MissingValuesBarPlot(self.vm_dataset, threshold=80) + fig, raw_data = MissingValuesBarPlot(self.vm_dataset, threshold=80) - # Check return type + # Check figure return type self.assertIsInstance(fig, go.Figure) + # Check raw data return type + self.assertIsInstance(raw_data, RawData) + def test_data_traces(self): - fig = MissingValuesBarPlot(self.vm_dataset, threshold=80) + fig, _ = MissingValuesBarPlot(self.vm_dataset, threshold=80) # Should have 3 traces: below threshold, above threshold, and threshold line self.assertEqual(len(fig.data), 3) diff --git a/tests/unit_tests/data_validation/test_PearsonCorrelationMatrix.py b/tests/unit_tests/data_validation/test_PearsonCorrelationMatrix.py index c2ac0c78c..ddd53aa75 100644 --- a/tests/unit_tests/data_validation/test_PearsonCorrelationMatrix.py +++ b/tests/unit_tests/data_validation/test_PearsonCorrelationMatrix.py @@ -31,9 +31,9 @@ def setUp(self): __log=False, ) - def test_returns_plotly_figure(self): + def test_returns_plotly_figure_and_raw_data(self): # Run the function - result = PearsonCorrelationMatrix(self.vm_dataset) + result, raw_data = PearsonCorrelationMatrix(self.vm_dataset) # Check if result is a Plotly Figure self.assertIsInstance(result, go.Figure) @@ -45,3 +45,6 @@ def test_returns_plotly_figure(self): # Check if the heatmap has the correct dimensions (3x3 for numeric columns) self.assertEqual(len(result.data[0].x), 3) # Number of numeric columns self.assertEqual(len(result.data[0].y), 3) # Number of numeric columns + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, vm.RawData) diff --git a/tests/unit_tests/data_validation/test_RollingStatsPlot.py b/tests/unit_tests/data_validation/test_RollingStatsPlot.py index 0d6aa4034..6d7b3ff6e 100644 --- a/tests/unit_tests/data_validation/test_RollingStatsPlot.py +++ b/tests/unit_tests/data_validation/test_RollingStatsPlot.py @@ -3,6 +3,7 @@ import validmind as vm import matplotlib.pyplot as plt from validmind.tests.data_validation.RollingStatsPlot import RollingStatsPlot +from validmind import RawData class TestRollingStatsPlot(unittest.TestCase): @@ -30,15 +31,18 @@ def setUp(self): ) def test_rolling_stats_plot(self): - figures = RollingStatsPlot(self.vm_dataset, window_size=10) + outputs = RollingStatsPlot(self.vm_dataset, window_size=10) - # Check that we get the correct number of figures (one per feature) - self.assertEqual(len(figures), 2) + # Check that we get the correct number of figures (one per feature) plus raw data + self.assertEqual(len(outputs), 3) - # Check that outputs are matplotlib figures - for fig in figures: + # Check that first outputs are matplotlib figures + for fig in outputs[:-1]: self.assertIsInstance(fig, plt.Figure) + # Check that the last output is raw data + self.assertIsInstance(outputs[-1], RawData) + # Clean up plt.close("all") diff --git a/tests/unit_tests/data_validation/test_RunsTest.py b/tests/unit_tests/data_validation/test_RunsTest.py index 769790403..215bf2066 100644 --- a/tests/unit_tests/data_validation/test_RunsTest.py +++ b/tests/unit_tests/data_validation/test_RunsTest.py @@ -5,7 +5,7 @@ class TestRunsTest(unittest.TestCase): - def test_returns_dataframe_with_expected_shape(self): + def test_returns_dataframe_and_raw_data(self): # Create a simple dataset with numeric columns df = pd.DataFrame( { @@ -32,7 +32,7 @@ def test_returns_dataframe_with_expected_shape(self): ) # Run the function - result = RunsTest(vm_dataset) + result, raw_data = RunsTest(vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -43,3 +43,6 @@ def test_returns_dataframe_with_expected_shape(self): # Check if the DataFrame has the expected number of rows (one for each numeric feature) self.assertEqual(len(result), len(vm_dataset.feature_columns_numeric)) + + # Check if raw_data is instance of RawData + self.assertIsInstance(raw_data, vm.RawData) diff --git a/tests/unit_tests/data_validation/test_ScatterPlot.py b/tests/unit_tests/data_validation/test_ScatterPlot.py index 1b9989587..e171bef8c 100644 --- a/tests/unit_tests/data_validation/test_ScatterPlot.py +++ b/tests/unit_tests/data_validation/test_ScatterPlot.py @@ -30,18 +30,21 @@ def setUp(self): __log=False, ) - def test_returns_tuple_of_figures(self): + def test_returns_tuple_of_figures_and_raw_data(self): # Run the function result = ScatterPlot(self.vm_dataset) # Check if result is a tuple self.assertIsInstance(result, tuple) - # Check if the tuple contains exactly one figure - self.assertEqual(len(result), 1) + # Check if the tuple contains exactly two elements + self.assertEqual(len(result), 2) - # Check if the figure is a matplotlib Figure + # Check if the first element is a matplotlib Figure self.assertIsInstance(result[0], plt.Figure) + # Check if the second element is an instance of RawData + self.assertIsInstance(result[1], vm.RawData) + # Check if all figures are properly closed self.assertEqual(len(plt.get_fignums()), 0) diff --git a/tests/unit_tests/data_validation/test_SeasonalDecompose.py b/tests/unit_tests/data_validation/test_SeasonalDecompose.py index ca234eb95..3c11e6fd1 100644 --- a/tests/unit_tests/data_validation/test_SeasonalDecompose.py +++ b/tests/unit_tests/data_validation/test_SeasonalDecompose.py @@ -3,6 +3,7 @@ import numpy as np import validmind as vm import plotly.graph_objects as go +from validmind import RawData from validmind.tests.data_validation.SeasonalDecompose import SeasonalDecompose from validmind.errors import SkipTestError @@ -41,28 +42,36 @@ def setUp(self): ) def test_seasonal_decompose(self): - figures = SeasonalDecompose(self.vm_dataset) + result = SeasonalDecompose(self.vm_dataset) - # Check that we get the correct number of figures (one per feature) - self.assertIsInstance(figures, tuple) - self.assertEqual(len(figures), 2) + # Check that we get the correct number of figures (one per feature + RawData) + self.assertIsInstance(result, tuple) + self.assertEqual(len(result), 3) # Check that outputs are plotly figures with correct subplots + figures = result[:-1] # all but the last item, which is RawData for fig in figures: self.assertIsInstance(fig, go.Figure) # Should have 6 subplots: Observed, Trend, Seasonal, Residuals, # Histogram, and Q-Q plot self.assertEqual(len(fig.data), 7) # 6 plots + 1 QQ line + # Check the last element in the returned result is a RawData instance + self.assertIsInstance(result[-1], RawData) + def test_seasonal_decompose_with_nan(self): # Should still work with NaN values - figures = SeasonalDecompose(self.vm_dataset_with_nan) - self.assertEqual(len(figures), 2) + result = SeasonalDecompose(self.vm_dataset_with_nan) + + # Expect 2 plots and 1 RawData object + self.assertEqual(len(result), 3) def test_seasonal_decompose_models(self): # Test additive model (should work with any data) - figures_add = SeasonalDecompose(self.vm_dataset, seasonal_model="additive") - self.assertEqual(len(figures_add), 2) + result_add = SeasonalDecompose(self.vm_dataset, seasonal_model="additive") + + # Expect 2 plots and 1 RawData object + self.assertEqual(len(result_add), 3) # Test multiplicative model (should raise ValueError for data with zero/negative values) with self.assertRaises(ValueError) as context: diff --git a/tests/unit_tests/data_validation/test_ShapiroWilk.py b/tests/unit_tests/data_validation/test_ShapiroWilk.py index d4fcb502b..b06da95e7 100644 --- a/tests/unit_tests/data_validation/test_ShapiroWilk.py +++ b/tests/unit_tests/data_validation/test_ShapiroWilk.py @@ -3,6 +3,7 @@ import pandas as pd import validmind as vm from validmind.tests.data_validation.ShapiroWilk import ShapiroWilk +from validmind import RawData class TestShapiroWilk(unittest.TestCase): @@ -28,26 +29,33 @@ def setUp(self): __log=False, ) - def test_returns_dataframe_with_expected_shape(self): + def test_returns_dataframe_and_rawdata(self): # Run the function - result = ShapiroWilk(self.vm_dataset) + result_df, result_rawdata = ShapiroWilk(self.vm_dataset) - # Check if result is a DataFrame - self.assertIsInstance(result, pd.DataFrame) + # Check if result_df is a DataFrame + self.assertIsInstance(result_df, pd.DataFrame) # Check if the DataFrame has the expected columns expected_columns = ["column", "stat", "pvalue"] - self.assertListEqual(list(result.columns), expected_columns) + self.assertListEqual(list(result_df.columns), expected_columns) # Check if the DataFrame has the expected number of rows (one for each numeric feature) - self.assertEqual(len(result), len(self.vm_dataset.feature_columns_numeric)) + self.assertEqual(len(result_df), len(self.vm_dataset.feature_columns_numeric)) + + # Check if result_rawdata is an instance of RawData + self.assertIsInstance(result_rawdata, RawData) def test_handles_different_distributions(self): # Run the function - result = ShapiroWilk(self.vm_dataset) + result_df, _ = ShapiroWilk(self.vm_dataset) # The normal distribution should have a higher p-value than the exponential distribution - normal_pvalue = result[result["column"] == "normal_dist"]["pvalue"].iloc[0] - exp_pvalue = result[result["column"] == "exponential_dist"]["pvalue"].iloc[0] + normal_pvalue = result_df[result_df["column"] == "normal_dist"]["pvalue"].iloc[ + 0 + ] + exp_pvalue = result_df[result_df["column"] == "exponential_dist"][ + "pvalue" + ].iloc[0] self.assertGreater(normal_pvalue, exp_pvalue) diff --git a/tests/unit_tests/data_validation/test_Skewness.py b/tests/unit_tests/data_validation/test_Skewness.py index 3e7344220..b366ebdc2 100644 --- a/tests/unit_tests/data_validation/test_Skewness.py +++ b/tests/unit_tests/data_validation/test_Skewness.py @@ -2,6 +2,7 @@ import pandas as pd import numpy as np import validmind as vm +from validmind import RawData from validmind.tests.data_validation.Skewness import Skewness @@ -37,11 +38,12 @@ def setUp(self): def test_skewness_threshold(self): # Test with default threshold (1) - results, passed = Skewness(self.vm_dataset) + results, passed, raw_data = Skewness(self.vm_dataset) # Check return types self.assertIsInstance(results, dict) self.assertIn(passed, [True, False]) + self.assertIsInstance(raw_data, RawData) # Check results structure results_table = results["Skewness Results for Dataset"] @@ -60,7 +62,7 @@ def test_skewness_threshold(self): def test_custom_threshold(self): # Test with very high threshold (all should pass) - results, passed = Skewness(self.vm_dataset, max_threshold=10) + results, passed, raw_data = Skewness(self.vm_dataset, max_threshold=10) results_table = results["Skewness Results for Dataset"] # All columns should pass with high threshold diff --git a/tests/unit_tests/data_validation/test_SpreadPlot.py b/tests/unit_tests/data_validation/test_SpreadPlot.py index ce4b51397..8c8afdc2d 100644 --- a/tests/unit_tests/data_validation/test_SpreadPlot.py +++ b/tests/unit_tests/data_validation/test_SpreadPlot.py @@ -3,6 +3,7 @@ import matplotlib.pyplot as plt import validmind as vm +from validmind import RawData from validmind.errors import SkipTestError from validmind.tests.data_validation.SpreadPlot import SpreadPlot @@ -33,7 +34,13 @@ def setUp(self): ) def test_spread_plot(self): - figures = SpreadPlot(self.vm_dataset) + result = SpreadPlot(self.vm_dataset) + + # The last item should be an instance of RawData + self.assertIsInstance(result[-1], RawData) + + # Collect all figures except the last item + figures = result[:-1] # Check that we get the correct number of figures (one per feature pair) self.assertEqual(len(figures), 1) # Only one pair (A-B) for two features diff --git a/tests/unit_tests/data_validation/test_TabularCategoricalBarPlots.py b/tests/unit_tests/data_validation/test_TabularCategoricalBarPlots.py index dcbc80f92..7935eedc5 100644 --- a/tests/unit_tests/data_validation/test_TabularCategoricalBarPlots.py +++ b/tests/unit_tests/data_validation/test_TabularCategoricalBarPlots.py @@ -2,6 +2,7 @@ import pandas as pd import validmind as vm import plotly.graph_objs as go +from validmind import RawData from validmind.errors import SkipTestError from validmind.tests.data_validation.TabularCategoricalBarPlots import ( TabularCategoricalBarPlots, @@ -39,6 +40,12 @@ def setUp(self): def test_categorical_bar_plots(self): figures = TabularCategoricalBarPlots(self.vm_dataset) + # Check that the last element is an instance of RawData + self.assertIsInstance(figures[-1], RawData) + + # Remove the raw data before checking figures + figures = figures[:-1] + # Check that we get the correct number of figures (one per categorical column) self.assertIsInstance(figures, tuple) self.assertEqual(len(figures), 2) # Should have 2 figures for cat1 and cat2 diff --git a/tests/unit_tests/data_validation/test_TabularDateTimeHistograms.py b/tests/unit_tests/data_validation/test_TabularDateTimeHistograms.py index 38925b23f..5907b69af 100644 --- a/tests/unit_tests/data_validation/test_TabularDateTimeHistograms.py +++ b/tests/unit_tests/data_validation/test_TabularDateTimeHistograms.py @@ -29,11 +29,14 @@ def setUp(self): ) def test_datetime_histograms(self): - figure = TabularDateTimeHistograms(self.vm_dataset) + figure, raw_data = TabularDateTimeHistograms(self.vm_dataset) # Check that output is a plotly figure self.assertIsInstance(figure, go.Figure) + # Check that raw data is an instance of RawData + self.assertIsInstance(raw_data, vm.RawData) + def test_no_datetime_index(self): # Should raise SkipTestError when no datetime index present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/data_validation/test_TargetRateBarPlots.py b/tests/unit_tests/data_validation/test_TargetRateBarPlots.py index 702048b64..ef013448b 100644 --- a/tests/unit_tests/data_validation/test_TargetRateBarPlots.py +++ b/tests/unit_tests/data_validation/test_TargetRateBarPlots.py @@ -4,6 +4,7 @@ import plotly.graph_objs as go from validmind.errors import SkipTestError from validmind.tests.data_validation.TargetRateBarPlots import TargetRateBarPlots +from validmind import RawData class TestTargetRateBarPlots(unittest.TestCase): @@ -65,14 +66,19 @@ def setUp(self): def test_target_rate_bar_plots(self): figures = TargetRateBarPlots(self.vm_dataset) - # Check that we get the correct number of figures (one per categorical column) + # Check that we get the correct number of figures and raw data (one per categorical column and one RawData) self.assertIsInstance(figures, tuple) - self.assertEqual(len(figures), 2) # Should have 2 figures for cat1 and cat2 + self.assertEqual( + len(figures), 3 + ) # Should have 2 figures for cat1 and cat2, and 1 RawData - # Check that outputs are plotly figures - for fig in figures: + # Check that outputs are plotly figures for the first two results + for fig in figures[:2]: self.assertIsInstance(fig, go.Figure) + # Check that the last output is RawData + self.assertIsInstance(figures[2], RawData) + def test_no_categorical_columns(self): # Should raise SkipTestError when no categorical columns present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/data_validation/test_TimeSeriesFrequency.py b/tests/unit_tests/data_validation/test_TimeSeriesFrequency.py index 62d74e311..c49b3f234 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesFrequency.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesFrequency.py @@ -4,6 +4,7 @@ import plotly.graph_objs as go from validmind.errors import SkipTestError from validmind.tests.data_validation.TimeSeriesFrequency import TimeSeriesFrequency +from validmind import RawData class TestTimeSeriesFrequency(unittest.TestCase): @@ -31,12 +32,13 @@ def setUp(self): ) def test_time_series_frequency(self): - frequencies, figure, passed = TimeSeriesFrequency(self.vm_dataset) + frequencies, figure, passed, raw_data = TimeSeriesFrequency(self.vm_dataset) # Check return types self.assertIsInstance(frequencies, list) self.assertIsInstance(figure, go.Figure) self.assertIsInstance(passed, bool) + self.assertIsInstance(raw_data, RawData) # Check the new raw_data type # Check frequencies structure self.assertEqual(len(frequencies), 2) # One entry per feature diff --git a/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py b/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py index 5155e406d..245b27db7 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py @@ -4,6 +4,7 @@ import plotly.graph_objects as go import validmind as vm from validmind.tests.data_validation.TimeSeriesHistogram import TimeSeriesHistogram +from validmind import RawData class TestTimeSeriesHistogram(unittest.TestCase): @@ -43,19 +44,22 @@ def setUp(self): __log=False, ) - def test_returns_tuple_of_figures(self): + def test_returns_tuple_of_figures_and_raw_data(self): # Run the function result = TimeSeriesHistogram(self.vm_dataset) # Check if result is a tuple self.assertIsInstance(result, tuple) - # Check if all elements in the tuple are Plotly Figures - for fig in result: + # Check if all elements except the last one in the tuple are Plotly Figures + for fig in result[:-1]: self.assertIsInstance(fig, go.Figure) - # Should have one histogram per column - self.assertEqual(len(result), len(self.df.columns)) + # The last element should be RawData + self.assertIsInstance(result[-1], RawData) + + # Should have one histogram per column plus one RawData object + self.assertEqual(len(result), len(self.df.columns) + 1) def test_histogram_properties(self): result = TimeSeriesHistogram(self.vm_dataset) diff --git a/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py b/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py index 2e71c16a2..a9bea6dd5 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py @@ -2,6 +2,7 @@ import pandas as pd import validmind as vm import plotly.graph_objs as go +from validmind import RawData from validmind.errors import SkipTestError from validmind.tests.data_validation.TimeSeriesLinePlot import TimeSeriesLinePlot @@ -33,14 +34,19 @@ def setUp(self): def test_time_series_line_plot(self): figures = TimeSeriesLinePlot(self.vm_dataset) - # Check that we get the correct number of figures (one per feature) + # Check that we get the correct number of figures plus raw data (one per feature + RawData) self.assertIsInstance(figures, tuple) - self.assertEqual(len(figures), 2) # Should have 2 figures for A and B + self.assertEqual( + len(figures), 3 + ) # Should have 2 figures for A and B and 1 RawData - # Check that outputs are plotly figures - for fig in figures: + # Check that the first two outputs are plotly figures + for fig in figures[:2]: self.assertIsInstance(fig, go.Figure) + # Check that the last output is RawData + self.assertIsInstance(figures[-1], RawData) + def test_no_datetime_index(self): # Should raise SkipTestError when no datetime index present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/data_validation/test_TimeSeriesMissingValues.py b/tests/unit_tests/data_validation/test_TimeSeriesMissingValues.py index 1fbb6db1d..3ba92ef24 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesMissingValues.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesMissingValues.py @@ -6,6 +6,7 @@ from validmind.tests.data_validation.TimeSeriesMissingValues import ( TimeSeriesMissingValues, ) +from validmind import RawData class TestTimeSeriesMissingValues(unittest.TestCase): @@ -39,13 +40,16 @@ def setUp(self): ) def test_time_series_missing_values(self): - results, barplot, heatmap, passed = TimeSeriesMissingValues(self.vm_dataset) + results, barplot, heatmap, passed, raw_data = TimeSeriesMissingValues( + self.vm_dataset + ) # Check return types self.assertIsInstance(results, list) self.assertIsInstance(barplot, go.Figure) self.assertIsInstance(heatmap, go.Figure) self.assertIsInstance(passed, bool) + self.assertIsInstance(raw_data, RawData) # Check results structure self.assertEqual(len(results), 2) # One entry per feature diff --git a/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py b/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py index 193653a91..dc3953f22 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py @@ -5,6 +5,7 @@ import plotly.graph_objs as go from validmind.errors import SkipTestError from validmind.tests.data_validation.TimeSeriesOutliers import TimeSeriesOutliers +from validmind import RawData class TestTimeSeriesOutliers(unittest.TestCase): @@ -36,12 +37,13 @@ def setUp(self): ) def test_time_series_outliers(self): - outlier_df, figures, passed = TimeSeriesOutliers(self.vm_dataset) + outlier_df, figures, passed, raw_data = TimeSeriesOutliers(self.vm_dataset) # Check return types self.assertIsInstance(outlier_df, pd.DataFrame) self.assertIsInstance(figures, list) self.assertIsInstance(passed, bool) + self.assertIsInstance(raw_data, RawData) # Check that we have the expected number of figures (one per feature) self.assertEqual(len(figures), 2) diff --git a/tests/unit_tests/data_validation/test_WOEBinPlots.py b/tests/unit_tests/data_validation/test_WOEBinPlots.py index 008db4b64..03a332aaf 100644 --- a/tests/unit_tests/data_validation/test_WOEBinPlots.py +++ b/tests/unit_tests/data_validation/test_WOEBinPlots.py @@ -4,6 +4,7 @@ import plotly.graph_objs as go from validmind.errors import SkipTestError from validmind.tests.data_validation.WOEBinPlots import WOEBinPlots +from validmind import RawData class TestWOEBinPlots(unittest.TestCase): @@ -43,20 +44,23 @@ def setUp(self): ) def test_woe_bin_plots(self): - figures = WOEBinPlots(self.vm_dataset) + results = WOEBinPlots(self.vm_dataset) # Check that we get the correct number of figures (one per feature column) - self.assertIsInstance(figures, tuple) + self.assertIsInstance(results, tuple) self.assertEqual( - len(figures), 3 - ) # Should have 3 figures: cat1, cat2, and numeric + len(results), 4 + ) # Should have 3 figures: cat1, cat2, and numeric and a RawData object - # Check that outputs are plotly figures - for fig in figures: + # Check that outputs are plotly figures and the last one is RawData + for fig in results[:-1]: self.assertIsInstance(fig, go.Figure) + # Check that the last output is an instance of RawData + self.assertIsInstance(results[-1], RawData) + # Verify all feature columns have corresponding plots - titles = [fig.layout.title.text for fig in figures] + titles = [fig.layout.title.text for fig in results[:-1]] expected_features = ["cat1", "cat2", "numeric"] self.assertTrue( all(any(feat in title for feat in expected_features) for title in titles) diff --git a/tests/unit_tests/data_validation/test_WOEBinTable.py b/tests/unit_tests/data_validation/test_WOEBinTable.py index b55af02f0..089d81b33 100644 --- a/tests/unit_tests/data_validation/test_WOEBinTable.py +++ b/tests/unit_tests/data_validation/test_WOEBinTable.py @@ -2,7 +2,7 @@ import pandas as pd import validmind as vm from validmind.errors import SkipTestError -from validmind.tests.data_validation.WOEBinTable import WOEBinTable +from validmind.tests.data_validation.WOEBinTable import WOEBinTable, RawData class TestWOEBinTable(unittest.TestCase): @@ -44,7 +44,7 @@ def setUp(self): ) def test_woe_bin_table(self): - result = WOEBinTable(self.vm_dataset) + result, raw_data = WOEBinTable(self.vm_dataset) # Check the table structure table = result["Weight of Evidence (WoE) and Information Value (IV)"] @@ -62,6 +62,9 @@ def test_woe_bin_table(self): expected_features = ["cat1", "cat2", "numeric"] self.assertTrue(all(feat in unique_variables for feat in expected_features)) + # Check that raw data is an instance of RawData + self.assertIsInstance(raw_data, RawData) + def test_no_target(self): # Should raise SkipTestError when no target column present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py b/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py index 97e7716c7..c6e9c588c 100644 --- a/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py +++ b/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py @@ -4,6 +4,7 @@ import validmind as vm from validmind.errors import SkipTestError from validmind.tests.data_validation.ZivotAndrewsArch import ZivotAndrewsArch +from validmind import RawData class TestZivotAndrewsArch(unittest.TestCase): @@ -39,7 +40,7 @@ def setUp(self): ) def test_zivot_andrews(self): - result = ZivotAndrewsArch(self.vm_dataset) + result, raw_data = ZivotAndrewsArch(self.vm_dataset) # Check return type and structure self.assertIsInstance(result, dict) @@ -56,6 +57,9 @@ def test_zivot_andrews(self): for field in required_fields: self.assertIn(field, value) + # Check raw data + self.assertIsInstance(raw_data, RawData) + def test_no_datetime_index(self): # Should raise SkipTestError when no datetime index present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/model_validation/ragas/test_AnswerCorrectness.py b/tests/unit_tests/model_validation/ragas/test_AnswerCorrectness.py index 7a5328fa2..5f9be8b6c 100644 --- a/tests/unit_tests/model_validation/ragas/test_AnswerCorrectness.py +++ b/tests/unit_tests/model_validation/ragas/test_AnswerCorrectness.py @@ -1,8 +1,6 @@ -# Load environment variables at the start of the test file import os import dotenv -# load openai api key dotenv.load_dotenv() if not "OPENAI_API_KEY" in os.environ: raise ValueError("OPENAI_API_KEY is not set") @@ -11,6 +9,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData from validmind.tests.model_validation.ragas.AnswerCorrectness import AnswerCorrectness @@ -18,7 +17,6 @@ class TestAnswerCorrectness(unittest.TestCase): def setUp(self): """Set up test fixtures before each test method.""" - # Create sample QA data self.df = pd.DataFrame( { "question": [ @@ -36,7 +34,6 @@ def setUp(self): } ) - # Initialize ValidMind dataset self.vm_dataset = vm.init_dataset( input_id="qa_dataset", dataset=self.df, @@ -52,20 +49,19 @@ def test_return_types(self): reference_column="ground_truth", ) - # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data - # Check dictionary structure self.assertIsInstance(result[0], dict) self.assertIn("Aggregate Scores", result[0]) self.assertIsInstance(result[0]["Aggregate Scores"], list) self.assertEqual(len(result[0]["Aggregate Scores"]), 1) - # Check figures self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + self.assertIsInstance(result[3], RawData) # Raw data + def test_aggregate_scores(self): """Test if aggregate scores have expected structure and values.""" result = AnswerCorrectness( @@ -77,7 +73,6 @@ def test_aggregate_scores(self): scores = result[0]["Aggregate Scores"][0] - # Check score keys expected_keys = [ "Mean Score", "Median Score", @@ -88,7 +83,6 @@ def test_aggregate_scores(self): ] self.assertListEqual(list(scores.keys()), expected_keys) - # Check score ranges self.assertTrue(0 <= scores["Mean Score"] <= 1) self.assertTrue(0 <= scores["Median Score"] <= 1) self.assertTrue(0 <= scores["Max Score"] <= 1) diff --git a/tests/unit_tests/model_validation/ragas/test_ContextEntityRecall.py b/tests/unit_tests/model_validation/ragas/test_ContextEntityRecall.py index 3d2a9f146..97f28f9c9 100644 --- a/tests/unit_tests/model_validation/ragas/test_ContextEntityRecall.py +++ b/tests/unit_tests/model_validation/ragas/test_ContextEntityRecall.py @@ -59,7 +59,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and RawData # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -71,6 +71,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], vm.RawData) # Raw data instance + def test_aggregate_scores(self): """Test if aggregate scores have expected structure and values.""" result = ContextEntityRecall( diff --git a/tests/unit_tests/model_validation/ragas/test_ContextPrecision.py b/tests/unit_tests/model_validation/ragas/test_ContextPrecision.py index ad719a8ca..fe5c82db8 100644 --- a/tests/unit_tests/model_validation/ragas/test_ContextPrecision.py +++ b/tests/unit_tests/model_validation/ragas/test_ContextPrecision.py @@ -67,7 +67,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and RawData # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -79,6 +79,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], vm.RawData) # Raw Data object + def test_precision_scores(self): """Test if precision scores reflect context ranking quality.""" result = ContextPrecision( diff --git a/tests/unit_tests/model_validation/ragas/test_ContextPrecisionWithoutReference.py b/tests/unit_tests/model_validation/ragas/test_ContextPrecisionWithoutReference.py index e863c28b7..6747b6baf 100644 --- a/tests/unit_tests/model_validation/ragas/test_ContextPrecisionWithoutReference.py +++ b/tests/unit_tests/model_validation/ragas/test_ContextPrecisionWithoutReference.py @@ -4,6 +4,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData # Load environment variables at the start of the test file dotenv.load_dotenv() @@ -69,7 +70,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -81,6 +82,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], RawData) + def test_precision_scores(self): """Test if precision scores reflect context relevance.""" result = ContextPrecisionWithoutReference( diff --git a/tests/unit_tests/model_validation/ragas/test_ContextRecall.py b/tests/unit_tests/model_validation/ragas/test_ContextRecall.py index 7e1f9b569..35a257b2a 100644 --- a/tests/unit_tests/model_validation/ragas/test_ContextRecall.py +++ b/tests/unit_tests/model_validation/ragas/test_ContextRecall.py @@ -4,6 +4,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData # Load environment variables at the start of the test file dotenv.load_dotenv() @@ -66,7 +67,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -78,6 +79,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], RawData) + def test_recall_scores(self): """Test if recall scores reflect coverage of reference information.""" result = ContextRecall( diff --git a/tests/unit_tests/model_validation/ragas/test_Faithfulness.py b/tests/unit_tests/model_validation/ragas/test_Faithfulness.py index 5b52aa0cf..d60ce2a55 100644 --- a/tests/unit_tests/model_validation/ragas/test_Faithfulness.py +++ b/tests/unit_tests/model_validation/ragas/test_Faithfulness.py @@ -4,6 +4,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData # Load environment variables at the start of the test file dotenv.load_dotenv() @@ -64,7 +65,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -76,6 +77,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], RawData) + def test_faithfulness_scores(self): """Test if faithfulness scores reflect response accuracy to contexts.""" result = Faithfulness( diff --git a/tests/unit_tests/model_validation/ragas/test_NoiseSensitivity.py b/tests/unit_tests/model_validation/ragas/test_NoiseSensitivity.py index ecd3d3fb8..ba08fc48c 100644 --- a/tests/unit_tests/model_validation/ragas/test_NoiseSensitivity.py +++ b/tests/unit_tests/model_validation/ragas/test_NoiseSensitivity.py @@ -4,6 +4,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData # Load environment variables at the start of the test file dotenv.load_dotenv() @@ -72,7 +73,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -84,6 +85,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], RawData) # Raw data check + def test_noise_sensitivity_scores(self): """Test if noise sensitivity scores reflect response quality.""" result = NoiseSensitivity( diff --git a/tests/unit_tests/model_validation/ragas/test_ResponseRelevancy.py b/tests/unit_tests/model_validation/ragas/test_ResponseRelevancy.py index 005cc82a2..f82d41001 100644 --- a/tests/unit_tests/model_validation/ragas/test_ResponseRelevancy.py +++ b/tests/unit_tests/model_validation/ragas/test_ResponseRelevancy.py @@ -61,7 +61,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -73,6 +73,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], vm.RawData) + def test_relevancy_scores(self): """Test if relevancy scores reflect response quality.""" result = ResponseRelevancy( diff --git a/tests/unit_tests/model_validation/ragas/test_SemanticSimilarity.py b/tests/unit_tests/model_validation/ragas/test_SemanticSimilarity.py index 5c0219726..a4ead5ed4 100644 --- a/tests/unit_tests/model_validation/ragas/test_SemanticSimilarity.py +++ b/tests/unit_tests/model_validation/ragas/test_SemanticSimilarity.py @@ -4,6 +4,7 @@ import pandas as pd import plotly.graph_objects as go import validmind as vm +from validmind import RawData # Load environment variables at the start of the test file dotenv.load_dotenv() @@ -47,7 +48,7 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) - self.assertEqual(len(result), 3) # dict and 2 figures + self.assertEqual(len(result), 4) # dict, 2 figures, and raw data # Check dictionary structure self.assertIsInstance(result[0], dict) @@ -59,6 +60,9 @@ def test_return_types(self): self.assertIsInstance(result[1], go.Figure) # Histogram self.assertIsInstance(result[2], go.Figure) # Box plot + # Check raw data + self.assertIsInstance(result[3], RawData) # Raw data + def test_similarity_scores(self): """Test if similarity scores reflect semantic closeness.""" result = SemanticSimilarity( diff --git a/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py b/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py index c814e5307..8a2d71d02 100644 --- a/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py +++ b/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py @@ -3,6 +3,7 @@ import numpy as np from sklearn.linear_model import LinearRegression import validmind as vm +from validmind import RawData from validmind.tests.model_validation.sklearn.FeatureImportance import FeatureImportance @@ -56,25 +57,28 @@ def setUp(self): __log=False, ) - def test_returns_dataframe(self): + def test_returns_dataframe_and_rawdata(self): # Run the function - result = FeatureImportance(self.vm_dataset, self.vm_model) + result_df, raw_data = FeatureImportance(self.vm_dataset, self.vm_model) - # Check if result is a DataFrame - self.assertIsInstance(result, pd.DataFrame) + # Check if result_df is a DataFrame + self.assertIsInstance(result_df, pd.DataFrame) # Check if DataFrame has expected columns expected_columns = ["Feature 1", "Feature 2", "Feature 3"] - self.assertTrue(all(col in result.columns for col in expected_columns)) + self.assertTrue(all(col in result_df.columns for col in expected_columns)) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) def test_feature_importance_ranking(self): # Run with all features - result = FeatureImportance(self.vm_dataset, self.vm_model, num_features=4) + result_df, _ = FeatureImportance(self.vm_dataset, self.vm_model, num_features=4) # Get feature names and scores features = [] for i in range(1, 5): - feature_info = result[f"Feature {i}"].iloc[0] + feature_info = result_df[f"Feature {i}"].iloc[0] feature_name = feature_info.split(";")[0].strip("[]") features.append(feature_name) @@ -87,21 +91,21 @@ def test_feature_importance_ranking(self): def test_num_features_parameter(self): # Test with different num_features values for num_features in [2, 3, 4]: - result = FeatureImportance( + result_df, _ = FeatureImportance( self.vm_dataset, self.vm_model, num_features=num_features ) # Check number of columns matches num_features feature_columns = [ - col for col in result.columns if col.startswith("Feature") + col for col in result_df.columns if col.startswith("Feature") ] self.assertEqual(len(feature_columns), num_features) def test_feature_importance_scores(self): - result = FeatureImportance(self.vm_dataset, self.vm_model) + result_df, _ = FeatureImportance(self.vm_dataset, self.vm_model) # Get first feature score - first_feature = result["Feature 1"].iloc[0] + first_feature = result_df["Feature 1"].iloc[0] score = float(first_feature.split(";")[1].strip("[] ")) # Check if score is positive (since we're using absolute importance) diff --git a/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py b/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py index cccb55886..979934c1e 100644 --- a/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py +++ b/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py @@ -3,6 +3,7 @@ import numpy as np from sklearn.linear_model import LinearRegression import validmind as vm +from validmind import RawData from validmind.tests.model_validation.sklearn.RegressionErrors import RegressionErrors @@ -43,16 +44,15 @@ def setUp(self): __log=False, ) - # Assing predictions to the dataset + # Assign predictions to the dataset self.vm_dataset.assign_predictions(self.vm_model) - def test_returns_dataframe(self): - + def test_returns_dataframe_and_raw_data(self): # Run the function - result = RegressionErrors(self.vm_model, self.vm_dataset) + results, raw_data = RegressionErrors(self.vm_model, self.vm_dataset) - # Check if result is a DataFrame - self.assertIsInstance(result, pd.DataFrame) + # Check if results is a DataFrame + self.assertIsInstance(results, pd.DataFrame) # Check if DataFrame has expected columns expected_columns = [ @@ -62,25 +62,28 @@ def test_returns_dataframe(self): "Mean Absolute Percentage Error (MAPE)", "Mean Bias Deviation (MBD)", ] - self.assertTrue(all(col in result.columns for col in expected_columns)) + self.assertTrue(all(col in results.columns for col in expected_columns)) # Check if DataFrame has exactly one row - self.assertEqual(len(result), 1) + self.assertEqual(len(results), 1) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) def test_error_metrics_range(self): - result = RegressionErrors(self.vm_model, self.vm_dataset) + results, _ = RegressionErrors(self.vm_model, self.vm_dataset) # All error metrics should be non-negative (except MBD) - self.assertGreaterEqual(result["Mean Absolute Error (MAE)"].iloc[0], 0) - self.assertGreaterEqual(result["Mean Squared Error (MSE)"].iloc[0], 0) - self.assertGreaterEqual(result["Root Mean Squared Error (RMSE)"].iloc[0], 0) + self.assertGreaterEqual(results["Mean Absolute Error (MAE)"].iloc[0], 0) + self.assertGreaterEqual(results["Mean Squared Error (MSE)"].iloc[0], 0) + self.assertGreaterEqual(results["Root Mean Squared Error (RMSE)"].iloc[0], 0) self.assertGreaterEqual( - result["Mean Absolute Percentage Error (MAPE)"].iloc[0], 0 + results["Mean Absolute Percentage Error (MAPE)"].iloc[0], 0 ) # Check if RMSE is square root of MSE - mse = result["Mean Squared Error (MSE)"].iloc[0] - rmse = result["Root Mean Squared Error (RMSE)"].iloc[0] + mse = results["Mean Squared Error (MSE)"].iloc[0] + rmse = results["Root Mean Squared Error (RMSE)"].iloc[0] self.assertAlmostEqual(rmse, np.sqrt(mse), places=5) def test_perfect_prediction(self): @@ -106,28 +109,32 @@ def test_perfect_prediction(self): __log=False, ) - # Assing predictions to the perfect dataset + # Assign predictions to the perfect dataset vm_perfect_dataset.assign_predictions(vm_perfect_model) # Calculate errors - result = RegressionErrors(vm_perfect_model, vm_perfect_dataset) + results, _ = RegressionErrors(vm_perfect_model, vm_perfect_dataset) # All error metrics should be very close to 0 - self.assertAlmostEqual(result["Mean Absolute Error (MAE)"].iloc[0], 0, places=5) - self.assertAlmostEqual(result["Mean Squared Error (MSE)"].iloc[0], 0, places=5) self.assertAlmostEqual( - result["Root Mean Squared Error (RMSE)"].iloc[0], 0, places=5 + results["Mean Absolute Error (MAE)"].iloc[0], 0, places=5 + ) + self.assertAlmostEqual(results["Mean Squared Error (MSE)"].iloc[0], 0, places=5) + self.assertAlmostEqual( + results["Root Mean Squared Error (RMSE)"].iloc[0], 0, places=5 + ) + self.assertAlmostEqual( + results["Mean Bias Deviation (MBD)"].iloc[0], 0, places=5 ) - self.assertAlmostEqual(result["Mean Bias Deviation (MBD)"].iloc[0], 0, places=5) def test_error_metrics_consistency(self): - result = RegressionErrors(self.vm_model, self.vm_dataset) + results, _ = RegressionErrors(self.vm_model, self.vm_dataset) # MSE should be greater than or equal to MAE squared - mae = result["Mean Absolute Error (MAE)"].iloc[0] - mse = result["Mean Squared Error (MSE)"].iloc[0] + mae = results["Mean Absolute Error (MAE)"].iloc[0] + mse = results["Mean Squared Error (MSE)"].iloc[0] self.assertGreaterEqual(mse, mae * mae) # RMSE should be greater than or equal to MAE - rmse = result["Root Mean Squared Error (RMSE)"].iloc[0] + rmse = results["Root Mean Squared Error (RMSE)"].iloc[0] self.assertGreaterEqual(rmse, mae) diff --git a/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py b/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py index d5017db0d..ae936193d 100644 --- a/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py +++ b/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py @@ -4,6 +4,7 @@ from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor import validmind as vm +from validmind import RawData from validmind.tests.model_validation.sklearn.RegressionR2SquareComparison import ( RegressionR2SquareComparison, ) @@ -88,19 +89,22 @@ def setUp(self): def test_returns_dataframe(self): # Run the function - result = RegressionR2SquareComparison( + result_df, raw_data = RegressionR2SquareComparison( [self.vm_dataset1, self.vm_dataset2], [self.vm_model1, self.vm_model2] ) # Check if result is a DataFrame - self.assertIsInstance(result, pd.DataFrame) + self.assertIsInstance(result_df, pd.DataFrame) # Check if DataFrame has expected columns expected_columns = ["Model", "Dataset", "R-Squared", "Adjusted R-Squared"] - self.assertTrue(all(col in result.columns for col in expected_columns)) + self.assertTrue(all(col in result_df.columns for col in expected_columns)) # Check if DataFrame has correct number of rows (2 datasets * 2 models) - self.assertEqual(len(result), 2) + self.assertEqual(len(result_df), 2) + + # Check raw data + self.assertIsInstance(raw_data, RawData) def test_perfect_prediction(self): # Create a perfect prediction scenario @@ -134,21 +138,23 @@ def test_perfect_prediction(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Calculate R2 scores - result = RegressionR2SquareComparison([vm_perfect_dataset], [vm_perfect_model]) + result_df, raw_data = RegressionR2SquareComparison( + [vm_perfect_dataset], [vm_perfect_model] + ) # Both R2 and Adjusted R2 should be very close to 1 for perfect predictions - self.assertAlmostEqual(result["R-Squared"].iloc[0], 1.0, places=5) - self.assertAlmostEqual(result["Adjusted R-Squared"].iloc[0], 1.0, places=5) + self.assertAlmostEqual(result_df["R-Squared"].iloc[0], 1.0, places=5) + self.assertAlmostEqual(result_df["Adjusted R-Squared"].iloc[0], 1.0, places=5) def test_model_comparison(self): # Compare linear model vs random forest on non-linear dataset - result = RegressionR2SquareComparison( + result_df, raw_data = RegressionR2SquareComparison( [self.vm_dataset2, self.vm_dataset2], [self.vm_model1, self.vm_model2] ) # Get R2 scores for both models - linear_r2 = result[result["Model"] == "linear_model"]["R-Squared"].iloc[0] - rf_r2 = result[result["Model"] == "rf_model"]["R-Squared"].iloc[0] + linear_r2 = result_df[result_df["Model"] == "linear_model"]["R-Squared"].iloc[0] + rf_r2 = result_df[result_df["Model"] == "rf_model"]["R-Squared"].iloc[0] # Random Forest should perform better on non-linear data self.assertGreater(rf_r2, linear_r2) @@ -186,8 +192,10 @@ def test_poor_prediction(self): vm_poor_dataset.assign_predictions(vm_poor_model) # Calculate R2 scores - result = RegressionR2SquareComparison([vm_poor_dataset], [vm_poor_model]) + result_df, raw_data = RegressionR2SquareComparison( + [vm_poor_dataset], [vm_poor_model] + ) # R2 scores should be close to 0 for poor predictions - self.assertLess(result["R-Squared"].iloc[0], 0.1) - self.assertLess(result["Adjusted R-Squared"].iloc[0], 0.1) + self.assertLess(result_df["R-Squared"].iloc[0], 0.1) + self.assertLess(result_df["Adjusted R-Squared"].iloc[0], 0.1) diff --git a/tests/unit_tests/model_validation/statsmodels/test_CumulativePredictionProbabilities.py b/tests/unit_tests/model_validation/statsmodels/test_CumulativePredictionProbabilities.py index 5ca2827cc..c77d2b43a 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_CumulativePredictionProbabilities.py +++ b/tests/unit_tests/model_validation/statsmodels/test_CumulativePredictionProbabilities.py @@ -7,6 +7,7 @@ CumulativePredictionProbabilities, ) import plotly.graph_objects as go +from validmind import RawData class TestCumulativePredictionProbabilities(unittest.TestCase): @@ -56,20 +57,25 @@ def setUp(self): # Assign predictions to the dataset self.vm_dataset.assign_predictions(self.vm_model) - def test_returns_figure(self): + def test_returns_figure_and_raw_data(self): # Run the function - result = CumulativePredictionProbabilities(self.vm_dataset, self.vm_model) + fig, raw_data = CumulativePredictionProbabilities( + self.vm_dataset, self.vm_model + ) # Check if result is a Plotly Figure - self.assertIsInstance(result, go.Figure) + self.assertIsInstance(fig, go.Figure) # Check if figure has traces - self.assertGreater(len(result.data), 0) + self.assertGreater(len(fig.data), 0) # Check if figure has expected layout elements - self.assertIn("title", result.layout) - self.assertIn("xaxis", result.layout) - self.assertIn("yaxis", result.layout) + self.assertIn("title", fig.layout) + self.assertIn("xaxis", fig.layout) + self.assertIn("yaxis", fig.layout) + + # Check that raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) def test_perfect_separation(self): # Create a dataset with perfect class separation @@ -103,21 +109,25 @@ def test_perfect_separation(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Generate plot - result = CumulativePredictionProbabilities(vm_perfect_dataset, vm_perfect_model) + fig, raw_data = CumulativePredictionProbabilities( + vm_perfect_dataset, vm_perfect_model + ) # Check if there are exactly two traces (one for each class) - self.assertEqual(len(result.data), 2) + self.assertEqual(len(fig.data), 2) # Check trace names contain class labels - trace_names = [trace.name for trace in result.data] + trace_names = [trace.name for trace in fig.data] self.assertTrue(any("0" in name for name in trace_names)) self.assertTrue(any("1" in name for name in trace_names)) def test_probability_range(self): - result = CumulativePredictionProbabilities(self.vm_dataset, self.vm_model) + fig, raw_data = CumulativePredictionProbabilities( + self.vm_dataset, self.vm_model + ) # Check if probabilities are within [0, 1] range - for trace in result.data: + for trace in fig.data: x_values = trace.x self.assertTrue(all(0 <= x <= 1 for x in x_values)) diff --git a/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py b/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py index cef727007..6554c931a 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py +++ b/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py @@ -3,6 +3,7 @@ import numpy as np from sklearn.linear_model import LinearRegression import validmind as vm +from validmind import RawData from validmind.tests.model_validation.statsmodels.DurbinWatsonTest import ( DurbinWatsonTest, ) @@ -61,19 +62,22 @@ def setUp(self): # Assign predictions to the dataset self.vm_dataset.assign_predictions(self.vm_model) - def test_returns_dataframe(self): + def test_returns_dataframe_and_rawdata(self): # Run the function - result = DurbinWatsonTest(self.vm_dataset, self.vm_model) + results, raw_data = DurbinWatsonTest(self.vm_dataset, self.vm_model) - # Check if result is a DataFrame - self.assertIsInstance(result, pd.DataFrame) + # Check if results is a DataFrame + self.assertIsInstance(results, pd.DataFrame) # Check if DataFrame has expected columns expected_columns = ["dw_statistic", "threshold", "autocorrelation"] - self.assertTrue(all(col in result.columns for col in expected_columns)) + self.assertTrue(all(col in results.columns for col in expected_columns)) # Check if DataFrame has exactly one row - self.assertEqual(len(result), 1) + self.assertEqual(len(results), 1) + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) def test_no_autocorrelation(self): # Create a dataset with no autocorrelation @@ -107,27 +111,30 @@ def test_no_autocorrelation(self): vm_no_auto_dataset.assign_predictions(vm_no_auto_model) # Run the function - result = DurbinWatsonTest(vm_no_auto_dataset, vm_no_auto_model) + results, raw_data = DurbinWatsonTest(vm_no_auto_dataset, vm_no_auto_model) - # Check if result is a DataFrame - self.assertIsInstance(result, pd.DataFrame) + # Check if results is a DataFrame + self.assertIsInstance(results, pd.DataFrame) # Check if DataFrame has expected columns expected_columns = ["dw_statistic", "threshold", "autocorrelation"] - self.assertTrue(all(col in result.columns for col in expected_columns)) + self.assertTrue(all(col in results.columns for col in expected_columns)) # Check if DataFrame has exactly one row - self.assertEqual(len(result), 1) + self.assertEqual(len(results), 1) # For no autocorrelation: # - DW statistic should be close to 2 (typically between 1.5 and 2.5) # - Should be labeled as "No autocorrelation" - dw_stat = result["dw_statistic"].iloc[0] + dw_stat = results["dw_statistic"].iloc[0] self.assertGreater( dw_stat, 1.5, f"DW statistic {dw_stat} indicates positive autocorrelation" ) self.assertLess( dw_stat, 2.5, f"DW statistic {dw_stat} indicates negative autocorrelation" ) - self.assertEqual(result["autocorrelation"].iloc[0], "No autocorrelation") - self.assertEqual(result["threshold"].iloc[0], "[1.5, 2.5]") + self.assertEqual(results["autocorrelation"].iloc[0], "No autocorrelation") + self.assertEqual(results["threshold"].iloc[0], "[1.5, 2.5]") + + # Check if raw_data is an instance of RawData + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/model_validation/statsmodels/test_GINITable.py b/tests/unit_tests/model_validation/statsmodels/test_GINITable.py index ae4af20c2..91c3e5980 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_GINITable.py +++ b/tests/unit_tests/model_validation/statsmodels/test_GINITable.py @@ -3,6 +3,7 @@ import numpy as np from sklearn.linear_model import LogisticRegression import validmind as vm +from validmind import RawData from validmind.tests.model_validation.statsmodels.GINITable import GINITable @@ -53,13 +54,16 @@ def setUp(self): # Assign predictions to the dataset self.vm_dataset.assign_predictions(self.vm_model) - def test_returns_dataframe(self): + def test_returns_dataframe_and_rawdata(self): # Run the function - result = GINITable(self.vm_dataset, self.vm_model) + result, raw_data = GINITable(self.vm_dataset, self.vm_model) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) + # Check if raw_data is RawData instance + self.assertIsInstance(raw_data, RawData) + # Check if DataFrame has expected columns expected_columns = ["AUC", "GINI", "KS"] self.assertTrue(all(col in result.columns for col in expected_columns)) @@ -99,7 +103,7 @@ def test_perfect_separation(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Calculate metrics - result = GINITable(vm_perfect_dataset, vm_perfect_model) + result, _ = GINITable(vm_perfect_dataset, vm_perfect_model) # For perfect separation: # - AUC should be 1.0 @@ -143,7 +147,7 @@ def test_random_prediction(self): vm_random_dataset.assign_predictions(vm_random_model) # Calculate metrics - result = GINITable(vm_random_dataset, vm_random_model) + result, _ = GINITable(vm_random_dataset, vm_random_model) # For random predictions: # - AUC should be close to 0.5 @@ -157,7 +161,7 @@ def test_random_prediction(self): def test_metric_ranges(self): # Test regular case - result = GINITable(self.vm_dataset, self.vm_model) + result, _ = GINITable(self.vm_dataset, self.vm_model) # Check metric ranges # AUC should be between 0 and 1 diff --git a/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py b/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py index a85d64d4e..2f87bb1ca 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py +++ b/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py @@ -56,15 +56,16 @@ def setUp(self): # Assign predictions to the dataset self.vm_dataset.assign_predictions(self.vm_model) - def test_returns_figure(self): + def test_returns_figure_and_raw_data(self): # Run the function result = PredictionProbabilitiesHistogram(self.vm_dataset, self.vm_model) - # Check if result is a Plotly Figure - self.assertIsInstance(result, go.Figure) + # Check if result contains a Plotly Figure and RawData + self.assertIsInstance(result[0], go.Figure) + self.assertIsInstance(result[1], vm.RawData) # Check if figure has traces - self.assertGreater(len(result.data), 0) + self.assertGreater(len(result[0].data), 0) def test_perfect_separation(self): # Create a dataset with perfect class separation @@ -101,12 +102,12 @@ def test_perfect_separation(self): result = PredictionProbabilitiesHistogram(vm_perfect_dataset, vm_perfect_model) # Check if there are exactly two traces (one for each class) - self.assertEqual(len(result.data), 2) + self.assertEqual(len(result[0].data), 2) def test_probability_ranges(self): result = PredictionProbabilitiesHistogram(self.vm_dataset, self.vm_model) # Check if probabilities are within [0, 1] range - for trace in result.data: + for trace in result[0].data: x_values = trace.x self.assertTrue(all(0 <= x <= 1 for x in x_values)) diff --git a/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py b/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py index fc7a344a3..ba85b3366 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py +++ b/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py @@ -4,6 +4,7 @@ import statsmodels.api as sm import validmind as vm import plotly.graph_objects as go +from validmind import RawData from validmind.tests.model_validation.statsmodels.RegressionCoeffs import ( RegressionCoeffs, ) @@ -52,67 +53,8 @@ def test_returns_tuple(self): # Check if first element is a Plotly Figure self.assertIsInstance(result[0], go.Figure) - # Check if second element is a DataFrame - self.assertIsInstance(result[1], pd.DataFrame) - - # Check if DataFrame has expected columns - expected_columns = [ - "Feature", - "coef", - "std err", - "t", - "P>|t|", - "[0.025", - "0.975]", - ] - self.assertTrue(all(col in result[1].columns for col in expected_columns)) - - def test_coefficient_values(self): - # Run the function - _, coeffs_df = RegressionCoeffs(self.vm_model) - - # Check if coefficients are close to true values - true_coeffs = {"const": 0.0, "feature1": 2.0, "feature2": -1.5, "feature3": 0.5} - - # First, verify all expected features are present - features = coeffs_df["Feature"].values - for feature in true_coeffs.keys(): - self.assertIn( - feature, features, f"Feature {feature} not found in coefficients" - ) - - # Then check coefficient values - for feature, true_coef in true_coeffs.items(): - mask = coeffs_df["Feature"] == feature - self.assertTrue(any(mask), f"Feature {feature} not found in coefficients") - - estimated_coef = float(coeffs_df.loc[mask, "coef"].iloc[0]) - # Allow for some estimation error - self.assertAlmostEqual( - estimated_coef, - true_coef, - places=1, - msg=f"Coefficient for {feature} differs significantly from true value", - ) - - def test_confidence_intervals(self): - # Run the function - _, coeffs_df = RegressionCoeffs(self.vm_model) - - # Check if confidence intervals are properly calculated - for _, row in coeffs_df.iterrows(): - coef = float(row["coef"]) - lower_ci = float(row["[0.025"]) - upper_ci = float(row["0.975]"]) - - # Check if confidence interval contains coefficient - self.assertLess(lower_ci, coef) - self.assertGreater(upper_ci, coef) - - # Check if confidence interval width is reasonable - ci_width = upper_ci - lower_ci - self.assertGreater(ci_width, 0) # Should be positive - self.assertLess(ci_width, 10) # Should not be too wide + # Check if second element is an instance of RawData + self.assertIsInstance(result[1], RawData) def test_plot_properties(self): # Run the function diff --git a/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py b/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py index d69a52a46..ed6819034 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py +++ b/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py @@ -5,6 +5,7 @@ import plotly.graph_objects as go from validmind.tests.model_validation.statsmodels.ScorecardHistogram import ( ScorecardHistogram, + RawData, ) @@ -49,15 +50,18 @@ def setUp(self): __log=False, ) - def test_returns_figure(self): + def test_returns_figure_and_raw_data(self): # Run the function - result = ScorecardHistogram(self.vm_dataset) + result_figure, result_raw_data = ScorecardHistogram(self.vm_dataset) - # Check if result is a Plotly Figure - self.assertIsInstance(result, go.Figure) + # Check if the first part of the result is a Plotly Figure + self.assertIsInstance(result_figure, go.Figure) # Check if figure has traces - self.assertGreater(len(result.data), 0) + self.assertGreater(len(result_figure.data), 0) + + # Check if the second part of the result is RawData + self.assertIsInstance(result_raw_data, RawData) def test_missing_score_column(self): # Create dataset without score column @@ -74,12 +78,12 @@ def test_missing_score_column(self): ScorecardHistogram(vm_dataset_no_score) def test_histogram_properties(self): - result = ScorecardHistogram(self.vm_dataset) + result_figure, _ = ScorecardHistogram(self.vm_dataset) # Should have two traces (one for each class) - self.assertEqual(len(result.data), 2) + self.assertEqual(len(result_figure.data), 2) - for trace in result.data: + for trace in result_figure.data: # Check if trace type is histogram self.assertEqual(trace.type, "histogram") @@ -88,15 +92,14 @@ def test_histogram_properties(self): self.assertTrue(all(300 <= x <= 900 for x in x_values)) def test_class_separation(self): - # Now test the visualization - result = ScorecardHistogram(self.vm_dataset) + result_figure, _ = ScorecardHistogram(self.vm_dataset) # Get scores for each class from the traces class_0_scores = None class_1_scores = None - for trace in result.data: + for trace in result_figure.data: if "target = 0" in trace.name: class_0_scores = np.array(trace.x) elif "target = 1" in trace.name: diff --git a/tests/unit_tests/model_validation/test_BertScore.py b/tests/unit_tests/model_validation/test_BertScore.py index f8d244f62..e77ede712 100644 --- a/tests/unit_tests/model_validation/test_BertScore.py +++ b/tests/unit_tests/model_validation/test_BertScore.py @@ -4,6 +4,7 @@ import validmind as vm import plotly.graph_objects as go from validmind.tests.model_validation.BertScore import BertScore +from validmind import RawData class TestBertScore(unittest.TestCase): @@ -70,14 +71,17 @@ def test_returns_tuple(self): # Check if result is a tuple self.assertIsInstance(result, tuple) - # Should return 7 items: 1 DataFrame and 6 figures (2 for each metric) - self.assertEqual(len(result), 7) + # Should return 8 items: 1 DataFrame, 6 figures (2 for each metric), and 1 RawData object + self.assertEqual(len(result), 8) # Check if first element is a DataFrame self.assertIsInstance(result[0], pd.DataFrame) - # Check if remaining elements are Plotly Figures - for fig in result[1:]: + # Check if raw data object is an instance of RawData + self.assertIsInstance(result[-1], RawData) + + # Check if remaining elements (figures) are Plotly Figures + for fig in result[1:-1]: self.assertIsInstance(fig, go.Figure) def test_metrics_dataframe(self): @@ -107,7 +111,7 @@ def test_metrics_dataframe(self): self.assertTrue(all(0 <= score <= 1 for score in result_df[col])) def test_figures_properties(self): - _, *figures = BertScore(self.vm_dataset, self.vm_model) + _, *figures, _ = BertScore(self.vm_dataset, self.vm_model) # Check if we have the expected number of figures (2 per metric) self.assertEqual(len(figures), 6) diff --git a/tests/unit_tests/model_validation/test_BleuScore.py b/tests/unit_tests/model_validation/test_BleuScore.py index 320753b3f..4539de7e7 100644 --- a/tests/unit_tests/model_validation/test_BleuScore.py +++ b/tests/unit_tests/model_validation/test_BleuScore.py @@ -3,6 +3,7 @@ import numpy as np import validmind as vm import plotly.graph_objects as go +from validmind import RawData from validmind.tests.model_validation.BleuScore import BleuScore @@ -72,9 +73,11 @@ def test_returns_tuple(self): # Check if first element is DataFrame self.assertIsInstance(result[0], pd.DataFrame) - # Check if remaining elements are figures - for fig in result[1:]: + # Check if remaining elements include expected number of figures and RawData + self.assertEqual(len(result), 4) + for fig in result[1:3]: self.assertIsInstance(fig, go.Figure) + self.assertIsInstance(result[3], RawData) def test_metrics_dataframe(self): """Test if metrics DataFrame has expected structure and values.""" @@ -99,7 +102,7 @@ def test_metrics_dataframe(self): def test_figures_properties(self): """Test if figures have expected properties.""" - _, *figures = BleuScore(self.vm_dataset, self.vm_model) + _, *figures, _ = BleuScore(self.vm_dataset, self.vm_model) # Check if we have the expected number of figures (2 figures: histogram and bar chart) self.assertEqual(len(figures), 2) diff --git a/tests/unit_tests/model_validation/test_ContextualRecall.py b/tests/unit_tests/model_validation/test_ContextualRecall.py index 4a1639c1b..c16be99c7 100644 --- a/tests/unit_tests/model_validation/test_ContextualRecall.py +++ b/tests/unit_tests/model_validation/test_ContextualRecall.py @@ -4,6 +4,7 @@ import validmind as vm import plotly.graph_objects as go from validmind.tests.model_validation.ContextualRecall import ContextualRecall +from validmind import RawData class TestContextualRecall(unittest.TestCase): @@ -72,8 +73,11 @@ def test_returns_tuple(self): # Check if first element is DataFrame self.assertIsInstance(result[0], pd.DataFrame) + # Check if raw data is instance of RawData + self.assertIsInstance(result[-1], RawData) + # Check if remaining elements are figures - for fig in result[1:]: + for fig in result[1:-1]: self.assertIsInstance(fig, go.Figure) def test_metrics_dataframe(self): @@ -99,7 +103,7 @@ def test_metrics_dataframe(self): def test_figures_properties(self): """Test if figures have expected properties.""" - _, *figures = ContextualRecall(self.vm_dataset, self.vm_model) + _, *figures, _ = ContextualRecall(self.vm_dataset, self.vm_model) # Check if we have the expected number of figures self.assertEqual(len(figures), 2) diff --git a/tests/unit_tests/model_validation/test_MeteorScore.py b/tests/unit_tests/model_validation/test_MeteorScore.py index c7d9ea671..c6e962836 100644 --- a/tests/unit_tests/model_validation/test_MeteorScore.py +++ b/tests/unit_tests/model_validation/test_MeteorScore.py @@ -4,6 +4,7 @@ import validmind as vm import plotly.graph_objects as go from validmind.tests.model_validation.MeteorScore import MeteorScore +from validmind import RawData class TestMeteorScore(unittest.TestCase): @@ -72,8 +73,11 @@ def test_returns_tuple(self): # Check if first element is DataFrame self.assertIsInstance(result[0], pd.DataFrame) + # Check if last element is RawData + self.assertIsInstance(result[-1], RawData) + # Check if remaining elements are figures - for fig in result[1:]: + for fig in result[1:-1]: self.assertIsInstance(fig, go.Figure) def test_metrics_dataframe(self): @@ -99,7 +103,7 @@ def test_metrics_dataframe(self): def test_figures_properties(self): """Test if figures have expected properties.""" - _, *figures = MeteorScore(self.vm_dataset, self.vm_model) + _, *figures, _ = MeteorScore(self.vm_dataset, self.vm_model) # Check if we have the expected number of figures self.assertEqual(len(figures), 2) diff --git a/tests/unit_tests/model_validation/test_ModelPredictionResiduals.py b/tests/unit_tests/model_validation/test_ModelPredictionResiduals.py index 5a0c6b10b..ce5c14eab 100644 --- a/tests/unit_tests/model_validation/test_ModelPredictionResiduals.py +++ b/tests/unit_tests/model_validation/test_ModelPredictionResiduals.py @@ -4,7 +4,10 @@ import plotly.graph_objects as go from sklearn.linear_model import LinearRegression import validmind as vm -from validmind.tests.model_validation.ModelPredictionResiduals import ModelPredictionResiduals +from validmind.tests.model_validation.ModelPredictionResiduals import ( + ModelPredictionResiduals, +) +from validmind import RawData class TestModelPredictionResiduals(unittest.TestCase): @@ -12,85 +15,80 @@ def setUp(self): """Set up test fixtures before each test method.""" # Create sample time series data np.random.seed(42) - dates = pd.date_range(start='2023-01-01', periods=100, freq='D') - + dates = pd.date_range(start="2023-01-01", periods=100, freq="D") + # Create predictable pattern with some noise X = np.arange(100).reshape(-1, 1) - y_true = 2 * X.ravel() + np.random.normal(0, 1, 100) # Linear pattern with noise - + y_true = 2 * X.ravel() + np.random.normal( + 0, 1, 100 + ) # Linear pattern with noise + # Create DataFrame - self.df = pd.DataFrame({ - 'feature': X.ravel(), - 'target': y_true - }, index=dates) - + self.df = pd.DataFrame({"feature": X.ravel(), "target": y_true}, index=dates) + # Train linear regression model model = LinearRegression() model.fit(X, y_true) - + # Initialize ValidMind dataset and model self.vm_dataset = vm.init_dataset( input_id="test_dataset", dataset=self.df, - target_column='target', + target_column="target", __log=False, ) - + self.vm_model = vm.init_model( input_id="test_model", model=model, __log=False, ) - + # Link predictions self.vm_dataset.assign_predictions(self.vm_model) def test_return_structure(self): - """Test if function returns expected structure (DataFrame and two figures).""" + """Test if function returns expected structure (DataFrame, two figures, RawData).""" result = ModelPredictionResiduals(self.vm_dataset, self.vm_model) - - # Should return a tuple of (DataFrame, Figure, Figure) - self.assertEqual(len(result), 3) + + # Should return a tuple of (DataFrame, Figure, Figure, RawData) + self.assertEqual(len(result), 4) self.assertIsInstance(result[0], pd.DataFrame) self.assertIsInstance(result[1], go.Figure) self.assertIsInstance(result[2], go.Figure) + self.assertIsInstance(result[3], RawData) def test_summary_dataframe_columns(self): """Test if summary DataFrame contains expected columns.""" summary_df = ModelPredictionResiduals(self.vm_dataset, self.vm_model)[0] - + expected_columns = [ - 'KS Statistic', - 'p-value', - 'KS Normality', - 'p-value Threshold' + "KS Statistic", + "p-value", + "KS Normality", + "p-value Threshold", ] - + self.assertListEqual(list(summary_df.columns), expected_columns) def test_date_filtering(self): """Test if date filtering works correctly.""" - start_date = '2023-02-01' - end_date = '2023-03-01' - + start_date = "2023-02-01" + end_date = "2023-03-01" + result_df = ModelPredictionResiduals( - self.vm_dataset, - self.vm_model, - start_date=start_date, - end_date=end_date + self.vm_dataset, self.vm_model, start_date=start_date, end_date=end_date )[0] - + # Results should still contain all summary statistics - self.assertIn('KS Statistic', result_df.columns) - self.assertIn('p-value', result_df.columns) + self.assertIn("KS Statistic", result_df.columns) + self.assertIn("p-value", result_df.columns) def test_p_value_threshold(self): """Test if p_value_threshold affects normality determination.""" custom_threshold = 0.01 summary_df = ModelPredictionResiduals( - self.vm_dataset, - self.vm_model, - p_value_threshold=custom_threshold + self.vm_dataset, self.vm_model, p_value_threshold=custom_threshold )[0] - - self.assertEqual(summary_df['p-value Threshold'].iloc[0], custom_threshold) + + self.assertEqual(summary_df["p-value Threshold"].iloc[0], custom_threshold) diff --git a/tests/unit_tests/model_validation/test_RegardScore.py b/tests/unit_tests/model_validation/test_RegardScore.py index 3a83f9f25..884c6f4d7 100644 --- a/tests/unit_tests/model_validation/test_RegardScore.py +++ b/tests/unit_tests/model_validation/test_RegardScore.py @@ -1,8 +1,8 @@ import unittest import pandas as pd -import numpy as np import plotly.graph_objects as go import validmind as vm +from validmind import RawData from validmind.tests.model_validation.RegardScore import RegardScore @@ -70,9 +70,12 @@ def test_returns_tuple(self): self.assertIsInstance(result[0], pd.DataFrame) # Check if remaining elements are figures - for fig in result[1:]: + for fig in result[1:-1]: self.assertIsInstance(fig, go.Figure) + # Check if last element is RawData + self.assertIsInstance(result[-1], RawData) + def test_metrics_dataframe(self): """Test if metrics DataFrame has expected structure and values.""" result_df = RegardScore(self.vm_dataset, self.vm_model)[0] @@ -97,7 +100,7 @@ def test_metrics_dataframe(self): def test_figures_properties(self): """Test if figures have expected properties.""" - _, *figures = RegardScore(self.vm_dataset, self.vm_model) + _, *figures, _ = RegardScore(self.vm_dataset, self.vm_model) # Check if we have the expected number of figures (16 figures: histogram and bar chart for different catergories) self.assertEqual(len(figures), 16) diff --git a/tests/unit_tests/model_validation/test_RougeScore.py b/tests/unit_tests/model_validation/test_RougeScore.py index cb054f3c5..e13a259a5 100644 --- a/tests/unit_tests/model_validation/test_RougeScore.py +++ b/tests/unit_tests/model_validation/test_RougeScore.py @@ -1,8 +1,8 @@ import unittest import pandas as pd -import numpy as np import plotly.graph_objects as go import validmind as vm +from validmind import RawData from validmind.tests.model_validation.RougeScore import RougeScore @@ -61,12 +61,14 @@ def setUp(self): def test_returns_dataframe(self): """Test if function returns expected structure.""" - result = RougeScore(self.vm_dataset, self.vm_model) + result_df, *figures, raw_data = RougeScore(self.vm_dataset, self.vm_model) # Check return type - self.assertIsInstance(result, tuple) - self.assertIsInstance(result[0], pd.DataFrame) - + self.assertIsInstance(result_df, pd.DataFrame) + self.assertIsInstance(figures, list) + for fig in figures: + self.assertIsInstance(fig, go.Figure) + self.assertIsInstance(raw_data, RawData) # Check expected columns in DataFrame expected_columns = [ "Metric", @@ -77,11 +79,11 @@ def test_returns_dataframe(self): "Standard Deviation", "Count", ] - self.assertListEqual(list(result[0].columns), expected_columns) + self.assertListEqual(list(result_df.columns), expected_columns) def test_score_ranges(self): """Test if ROUGE scores are within valid range (0 to 1).""" - result_df = RougeScore(self.vm_dataset, self.vm_model)[0] + result_df, *figures, _ = RougeScore(self.vm_dataset, self.vm_model) score_columns = ["Mean Score", "Median Score", "Max Score", "Min Score"] for col in score_columns: @@ -89,7 +91,7 @@ def test_score_ranges(self): def test_metrics_present(self): """Test if all expected metrics are present.""" - result_df = RougeScore(self.vm_dataset, self.vm_model)[0] + result_df, *figures, _ = RougeScore(self.vm_dataset, self.vm_model) expected_metrics = ["Precision", "Recall", "F1 Score"] actual_metrics = result_df["Metric"].tolist() @@ -97,7 +99,7 @@ def test_metrics_present(self): def test_figures_properties(self): """Test if figures have expected properties.""" - _, *figures = RougeScore(self.vm_dataset, self.vm_model) + _, *figures, _ = RougeScore(self.vm_dataset, self.vm_model) # Should have 6 figures (histogram and bar chart for each metric) self.assertEqual(len(figures), 6) @@ -134,7 +136,7 @@ def test_identical_texts(self): self.vm_model, prediction_column="predictions" ) - result_df = RougeScore(vm_dataset_identical, self.vm_model)[0] + result_df, *figures, _ = RougeScore(vm_dataset_identical, self.vm_model) # For identical texts, F1 scores should be 1.0 or very close to 1.0 f1_score = result_df[result_df["Metric"] == "F1 Score"]["Mean Score"].iloc[0] @@ -149,8 +151,14 @@ def test_identical_texts(self): def test_custom_metric(self): """Test if custom ROUGE metric parameter works.""" - result = RougeScore(self.vm_dataset, self.vm_model, metric="rouge-2") + result_df, *figures, _ = RougeScore( + self.vm_dataset, self.vm_model, metric="rouge-2" + ) # Should still return DataFrame and figures - self.assertIsInstance(result[0], pd.DataFrame) - self.assertTrue(all(isinstance(fig, go.Figure) for fig in result[1:])) + self.assertIsInstance(result_df, pd.DataFrame) + self.assertTrue(all(isinstance(fig, go.Figure) for fig in figures)) + + # Check raw data instance + raw_data = raw_data + self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/model_validation/test_TimeSeriesPredictionWithCI.py b/tests/unit_tests/model_validation/test_TimeSeriesPredictionWithCI.py index 40b88e6bd..db2416edf 100644 --- a/tests/unit_tests/model_validation/test_TimeSeriesPredictionWithCI.py +++ b/tests/unit_tests/model_validation/test_TimeSeriesPredictionWithCI.py @@ -4,6 +4,7 @@ import plotly.graph_objects as go from datetime import datetime import validmind as vm +from validmind import RawData from validmind.tests.model_validation.TimeSeriesPredictionWithCI import ( TimeSeriesPredictionWithCI, ) @@ -50,22 +51,25 @@ def setUp(self): def test_return_types(self): """Test if function returns expected types.""" - fig, breaches_df = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) + fig, breaches_df, raw_data = TimeSeriesPredictionWithCI( + self.vm_dataset, self.vm_model + ) # Check return types self.assertIsInstance(fig, go.Figure) self.assertIsInstance(breaches_df, pd.DataFrame) + self.assertIsInstance(raw_data, RawData) def test_figure_properties(self): """Test if figure has expected properties.""" - fig, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) + fig, _, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) # Check if figure has exactly four traces (Actual, Predicted, CI Lower, CI Upper) self.assertEqual(len(fig.data), 4) def test_breaches_dataframe(self): """Test if breaches DataFrame has expected structure and values.""" - _, breaches_df = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) + _, breaches_df, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) # Check columns expected_columns = [ @@ -97,7 +101,7 @@ def test_breaches_dataframe(self): def test_custom_confidence(self): """Test if custom confidence level works.""" custom_confidence = 0.90 - _, breaches_df = TimeSeriesPredictionWithCI( + _, breaches_df, _ = TimeSeriesPredictionWithCI( self.vm_dataset, self.vm_model, confidence=custom_confidence ) @@ -106,7 +110,7 @@ def test_custom_confidence(self): def test_data_length(self): """Test if the plotted data has correct length.""" - fig, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) + fig, _, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) # All traces should have same length as input data for trace in fig.data: @@ -115,7 +119,7 @@ def test_data_length(self): def test_datetime_index(self): """Test if x-axis uses datetime values.""" - fig, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) + fig, _, _ = TimeSeriesPredictionWithCI(self.vm_dataset, self.vm_model) # Check if x values are datetime objects for all traces for trace in fig.data: diff --git a/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py b/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py index 446c0d154..2f4a4d74e 100644 --- a/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py +++ b/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py @@ -48,21 +48,23 @@ def setUp(self): self.vm_model, prediction_column="predictions" ) - def test_return_type(self): - """Test if function returns a Plotly figure.""" + def test_return_types(self): + """Test if function returns a tuple with a Plotly figure and raw data.""" result = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) - self.assertIsInstance(result, go.Figure) + self.assertIsInstance(result, tuple) + self.assertIsInstance(result[0], go.Figure) + self.assertIsInstance(result[1], vm.RawData) def test_figure_properties(self): """Test if figure has expected properties.""" - fig = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) + fig, _ = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) # Check if figure has exactly two traces (Actual and Predicted) self.assertEqual(len(fig.data), 2) def test_data_length(self): """Test if the plotted data has correct length.""" - fig = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) + fig, _ = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) # Both traces should have same length as input data self.assertEqual(len(fig.data[0].x), len(self.df)) @@ -72,7 +74,7 @@ def test_data_length(self): def test_datetime_index(self): """Test if x-axis uses datetime values.""" - fig = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) + fig, _ = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) # Check if x values are datetime objects self.assertTrue( diff --git a/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py b/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py index 02e3b2f25..edc6c4b07 100644 --- a/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py +++ b/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py @@ -47,15 +47,18 @@ def setUp(self): def test_return_types(self): """Test if function returns expected types.""" - fig, results_df = TimeSeriesR2SquareBySegments(self.vm_dataset, self.vm_model) + fig, results_df, raw_data = TimeSeriesR2SquareBySegments( + self.vm_dataset, self.vm_model + ) # Check return types self.assertIsInstance(fig, go.Figure) self.assertIsInstance(results_df, pd.DataFrame) + self.assertIsInstance(raw_data, vm.RawData) def test_results_dataframe(self): """Test if results DataFrame has expected structure.""" - _, results_df = TimeSeriesR2SquareBySegments(self.vm_dataset, self.vm_model) + _, results_df, _ = TimeSeriesR2SquareBySegments(self.vm_dataset, self.vm_model) # Check columns expected_columns = ["Segments", "Start Date", "End Date", "R-Squared"] @@ -79,7 +82,7 @@ def test_custom_segments(self): "end_date": [dates[32], dates[65], dates[-1]], } - _, results_df = TimeSeriesR2SquareBySegments( + _, results_df, _ = TimeSeriesR2SquareBySegments( self.vm_dataset, self.vm_model, segments=custom_segments ) diff --git a/tests/unit_tests/model_validation/test_TokenDisparity.py b/tests/unit_tests/model_validation/test_TokenDisparity.py index 539853e49..6a7bcbcd1 100644 --- a/tests/unit_tests/model_validation/test_TokenDisparity.py +++ b/tests/unit_tests/model_validation/test_TokenDisparity.py @@ -66,8 +66,11 @@ def test_return_types(self): self.assertIsInstance(result, tuple) self.assertIsInstance(result[0], pd.DataFrame) + # Check raw data is an instance of RawData + self.assertIsInstance(result[-1], vm.RawData) + # Check all figures are Plotly figures - for fig in result[1:]: + for fig in result[1:-1]: self.assertIsInstance(fig, go.Figure) def test_results_dataframe(self): diff --git a/tests/unit_tests/model_validation/test_ToxicityScore.py b/tests/unit_tests/model_validation/test_ToxicityScore.py index 26bdebc6d..ce3c760a7 100644 --- a/tests/unit_tests/model_validation/test_ToxicityScore.py +++ b/tests/unit_tests/model_validation/test_ToxicityScore.py @@ -3,6 +3,7 @@ import plotly.graph_objects as go import validmind as vm from validmind.tests.model_validation.ToxicityScore import ToxicityScore +from validmind import RawData class TestToxicityScore(unittest.TestCase): @@ -70,12 +71,15 @@ def test_return_types(self): # Check return types self.assertIsInstance(result, tuple) self.assertIsInstance(result[0], pd.DataFrame) - self.assertEqual(len(result), 7) # 1 DataFrame + 6 figures + self.assertEqual(len(result), 8) # 1 DataFrame + 6 figures + 1 RawData # Check all figures are Plotly figures - for fig in result[1:]: + for fig in result[1:-1]: self.assertIsInstance(fig, go.Figure) + # Check the last element is RawData + self.assertIsInstance(result[-1], RawData) + def test_results_dataframe(self): """Test if results DataFrame has expected structure.""" result_df = ToxicityScore(self.vm_dataset, self.vm_model)[0] diff --git a/validmind/tests/data_validation/DatasetSplit.py b/validmind/tests/data_validation/DatasetSplit.py index 7910ce046..ee6e2b2c2 100644 --- a/validmind/tests/data_validation/DatasetSplit.py +++ b/validmind/tests/data_validation/DatasetSplit.py @@ -4,8 +4,8 @@ from typing import List -from validmind import tags, tasks -from validmind.vm_models import RawData, VMDataset +from validmind import RawData, tags, tasks +from validmind.vm_models import VMDataset DATASET_LABELS = { "train_ds": "Training", From 9d72f56429df439958a05f4cc4b5319946056b3b Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 15:03:58 -0500 Subject: [PATCH 06/13] chore: reformatting and linting files --- validmind/tests/data_validation/TabularNumericalHistograms.py | 2 +- validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py | 2 +- .../tests/model_validation/statsmodels/ScorecardHistogram.py | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/validmind/tests/data_validation/TabularNumericalHistograms.py b/validmind/tests/data_validation/TabularNumericalHistograms.py index d9c4c9148..438cdfd89 100644 --- a/validmind/tests/data_validation/TabularNumericalHistograms.py +++ b/validmind/tests/data_validation/TabularNumericalHistograms.py @@ -4,7 +4,7 @@ import plotly.graph_objs as go -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset diff --git a/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py b/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py index ff852432f..44cb9d3f4 100644 --- a/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py +++ b/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py @@ -4,7 +4,7 @@ from sklearn import metrics -from validmind import tags, tasks +from validmind import RawData, tags, tasks from validmind.vm_models import VMDataset, VMModel diff --git a/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py b/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py index 74141cb86..bd4e719fd 100644 --- a/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py +++ b/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py @@ -2,7 +2,6 @@ # See the LICENSE file in the root of this repository for details. # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial -import pandas as pd import plotly.graph_objects as go from matplotlib import cm From bb21b4d1291cb9dc94f314fc3286e275843985b2 Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 15:11:13 -0500 Subject: [PATCH 07/13] fix: fix two failing tests --- .../model_validation/sklearn/test_ROCCurve.py | 19 ++++--------------- .../model_validation/test_RougeScore.py | 9 ++++----- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/tests/unit_tests/model_validation/sklearn/test_ROCCurve.py b/tests/unit_tests/model_validation/sklearn/test_ROCCurve.py index 277913fc3..f370fe5b4 100644 --- a/tests/unit_tests/model_validation/sklearn/test_ROCCurve.py +++ b/tests/unit_tests/model_validation/sklearn/test_ROCCurve.py @@ -70,11 +70,11 @@ def test_roc_curve_structure(self): # Check return type is tuple with RawData and Figure self.assertIsInstance(result, tuple) self.assertEqual(len(result), 2) - self.assertIsInstance(result[0], vm.RawData) - self.assertIsInstance(result[1], go.Figure) + self.assertIsInstance(result[0], go.Figure) + self.assertIsInstance(result[1], vm.RawData) # Get the figure from the tuple - fig = result[1] + fig = result[0] # Check figure has two traces (ROC curve and random baseline) self.assertEqual(len(fig.data), 2) @@ -88,11 +88,6 @@ def test_roc_curve_structure(self): auc = float(fig.data[0].name.split("=")[1].strip().rstrip(")")) self.assertGreater(auc, 0.5) - # Check RawData contains expected fields - self.assertTrue(hasattr(result[0], "fpr")) - self.assertTrue(hasattr(result[0], "tpr")) - self.assertTrue(hasattr(result[0], "auc")) - def test_perfect_separation(self): # Create perfectly separable dataset X = np.random.randn(1000, 2) @@ -143,14 +138,8 @@ def test_perfect_separation(self): vm_train_ds.assign_predictions(vm_perfect_model) vm_test_ds.assign_predictions(vm_perfect_model) - result = ROCCurve(vm_perfect_model, vm_test_ds) - - # Get the figure from the tuple - fig = result[1] + fig, _ = ROCCurve(vm_perfect_model, vm_test_ds) # Check AUC score (should be very close to 1.0) auc = float(fig.data[0].name.split("=")[1].strip().rstrip(")")) self.assertGreater(auc, 0.95) - - # Verify RawData AUC matches figure - self.assertAlmostEqual(result[0].auc, auc, places=2) diff --git a/tests/unit_tests/model_validation/test_RougeScore.py b/tests/unit_tests/model_validation/test_RougeScore.py index e13a259a5..51ba71624 100644 --- a/tests/unit_tests/model_validation/test_RougeScore.py +++ b/tests/unit_tests/model_validation/test_RougeScore.py @@ -83,7 +83,7 @@ def test_returns_dataframe(self): def test_score_ranges(self): """Test if ROUGE scores are within valid range (0 to 1).""" - result_df, *figures, _ = RougeScore(self.vm_dataset, self.vm_model) + result_df, *_ = RougeScore(self.vm_dataset, self.vm_model) score_columns = ["Mean Score", "Median Score", "Max Score", "Min Score"] for col in score_columns: @@ -91,7 +91,7 @@ def test_score_ranges(self): def test_metrics_present(self): """Test if all expected metrics are present.""" - result_df, *figures, _ = RougeScore(self.vm_dataset, self.vm_model) + result_df, *_ = RougeScore(self.vm_dataset, self.vm_model) expected_metrics = ["Precision", "Recall", "F1 Score"] actual_metrics = result_df["Metric"].tolist() @@ -136,7 +136,7 @@ def test_identical_texts(self): self.vm_model, prediction_column="predictions" ) - result_df, *figures, _ = RougeScore(vm_dataset_identical, self.vm_model) + result_df, *_ = RougeScore(vm_dataset_identical, self.vm_model) # For identical texts, F1 scores should be 1.0 or very close to 1.0 f1_score = result_df[result_df["Metric"] == "F1 Score"]["Mean Score"].iloc[0] @@ -151,7 +151,7 @@ def test_identical_texts(self): def test_custom_metric(self): """Test if custom ROUGE metric parameter works.""" - result_df, *figures, _ = RougeScore( + result_df, *figures, raw_data = RougeScore( self.vm_dataset, self.vm_model, metric="rouge-2" ) @@ -160,5 +160,4 @@ def test_custom_metric(self): self.assertTrue(all(isinstance(fig, go.Figure) for fig in figures)) # Check raw data instance - raw_data = raw_data self.assertIsInstance(raw_data, RawData) From c412f5d20406785bab20152d37fd36404d90e937 Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 16:31:15 -0500 Subject: [PATCH 08/13] fix: lots of manual fixes to bulk ai updates --- .../IsolationForestOutliers.py | 4 +-- validmind/tests/data_validation/JarqueBera.py | 4 +-- validmind/tests/data_validation/KPSS.py | 6 ++-- validmind/tests/data_validation/LJungBox.py | 6 ++-- .../data_validation/MutualInformation.py | 12 +++---- .../ProtectedClassesDescription.py | 4 +-- .../ProtectedClassesThresholdOptimizer.py | 8 ++--- validmind/tests/data_validation/RunsTest.py | 4 +-- .../tests/data_validation/ScatterPlot.py | 4 +-- .../tests/data_validation/ShapiroWilk.py | 4 +-- validmind/tests/data_validation/Skewness.py | 3 +- validmind/tests/data_validation/SpreadPlot.py | 2 +- .../data_validation/TimeSeriesHistogram.py | 6 ++-- .../data_validation/TimeSeriesLinePlot.py | 4 +-- .../data_validation/TimeSeriesOutliers.py | 9 ++---- .../tests/data_validation/WOEBinTable.py | 2 +- .../tests/data_validation/ZivotAndrewsArch.py | 15 ++------- .../tests/data_validation/nlp/CommonWords.py | 2 +- .../tests/data_validation/nlp/Punctuations.py | 3 +- .../tests/data_validation/nlp/StopWords.py | 8 ++--- validmind/tests/model_validation/BleuScore.py | 2 +- .../tests/model_validation/RougeScore.py | 12 +++---- .../TimeSeriesPredictionsPlot.py | 6 ++-- .../TimeSeriesR2SquareBySegments.py | 4 +-- .../tests/model_validation/ToxicityScore.py | 6 ++-- .../embeddings/EmbeddingsVisualization2D.py | 2 +- .../embeddings/StabilityAnalysisKeyword.py | 4 +-- .../StabilityAnalysisRandomNoise.py | 4 +-- .../embeddings/StabilityAnalysisSynonyms.py | 6 ++-- .../StabilityAnalysisTranslation.py | 4 +-- .../model_validation/embeddings/utils.py | 12 ++++++- .../sklearn/AdjustedMutualInformation.py | 15 +++++---- .../sklearn/AdjustedRandIndex.py | 16 +++++----- .../ClassifierThresholdOptimization.py | 15 ++------- .../sklearn/ClusterPerformanceMetrics.py | 31 +++++++++---------- .../sklearn/CompletenessScore.py | 21 ++++++------- .../sklearn/FeatureImportance.py | 18 ++--------- .../sklearn/FowlkesMallowsScore.py | 18 +++++------ .../sklearn/HomogeneityScore.py | 20 ++++++------ .../sklearn/HyperParametersTuning.py | 10 ++---- .../sklearn/MinimumROCAUCScore.py | 10 ++---- .../sklearn/OverfitDiagnosis.py | 8 ++--- .../sklearn/RegressionErrors.py | 4 +-- .../sklearn/RegressionPerformance.py | 4 +-- .../sklearn/RegressionR2Square.py | 16 +++++----- .../sklearn/RegressionR2SquareComparison.py | 6 ++-- .../sklearn/SHAPGlobalImportance.py | 2 +- .../sklearn/ScoreProbabilityAlignment.py | 4 +-- .../sklearn/TrainingTestDegradation.py | 4 +-- .../model_validation/sklearn/VMeasure.py | 19 ++++++------ .../sklearn/WeakspotsDiagnosis.py | 7 +---- .../CumulativePredictionProbabilities.py | 13 ++++++-- .../statsmodels/DurbinWatsonTest.py | 6 ++-- .../model_validation/statsmodels/GINITable.py | 23 +++++--------- .../statsmodels/KolmogorovSmirnov.py | 4 +-- .../statsmodels/Lilliefors.py | 6 ++-- .../PredictionProbabilitiesHistogram.py | 4 +-- .../statsmodels/RegressionCoeffs.py | 4 +-- .../RegressionFeatureSignificance.py | 4 +-- .../RegressionModelForecastPlot.py | 12 ++----- .../RegressionModelForecastPlotLevels.py | 2 +- .../RegressionModelSensitivityPlot.py | 5 ++- .../statsmodels/RegressionModelSummary.py | 20 ++++++------ .../statsmodels/ScorecardHistogram.py | 4 +-- .../PredictionAcrossEachFeature.py | 4 +-- validmind/tests/prompt_validation/Bias.py | 14 +++++---- validmind/tests/prompt_validation/Clarity.py | 22 +++++++------ .../prompt_validation/NegativeInstruction.py | 23 ++++++++------ 68 files changed, 256 insertions(+), 334 deletions(-) diff --git a/validmind/tests/data_validation/IsolationForestOutliers.py b/validmind/tests/data_validation/IsolationForestOutliers.py index 1fb95f595..6b6b191fc 100644 --- a/validmind/tests/data_validation/IsolationForestOutliers.py +++ b/validmind/tests/data_validation/IsolationForestOutliers.py @@ -8,7 +8,7 @@ import seaborn as sns from sklearn.ensemble import IsolationForest -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset @@ -93,4 +93,4 @@ def IsolationForestOutliers( plt.close() - return tuple(figures + [RawData(predictions=y_pred)]) + return tuple(figures) diff --git a/validmind/tests/data_validation/JarqueBera.py b/validmind/tests/data_validation/JarqueBera.py index e7f12c902..927474a5b 100644 --- a/validmind/tests/data_validation/JarqueBera.py +++ b/validmind/tests/data_validation/JarqueBera.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.stattools import jarque_bera -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tasks("classification", "regression") @@ -67,4 +67,4 @@ def JarqueBera(dataset): jb_df.reset_index(inplace=True) jb_df.columns = ["column", "stat", "pvalue", "skew", "kurtosis"] - return jb_df, RawData(df_features=df) + return jb_df diff --git a/validmind/tests/data_validation/KPSS.py b/validmind/tests/data_validation/KPSS.py index 75ac336f4..9d3d4985e 100644 --- a/validmind/tests/data_validation/KPSS.py +++ b/validmind/tests/data_validation/KPSS.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.tsa.stattools import kpss -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -80,8 +80,6 @@ def KPSS(dataset: VMDataset): if not kpss_table: raise SkipTestError(f"No KPSS results found for dataset: {dataset.input_id}") - kpss_df = pd.DataFrame(kpss_table) - return { "KPSS Test Results": kpss_table, - }, RawData(kpss_results=kpss_df) + } diff --git a/validmind/tests/data_validation/LJungBox.py b/validmind/tests/data_validation/LJungBox.py index d89afbf3b..f746379bb 100644 --- a/validmind/tests/data_validation/LJungBox.py +++ b/validmind/tests/data_validation/LJungBox.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.diagnostic import acorr_ljungbox -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tasks("regression") @@ -52,17 +52,15 @@ def LJungBox(dataset): df = dataset.df ljung_box_values = {} - raw_data = {} for col in df.columns: lb_results = acorr_ljungbox(df[col].values, return_df=True) ljung_box_values[col] = { "stat": lb_results.iloc[0]["lb_stat"], "pvalue": lb_results.iloc[0]["lb_pvalue"], } - raw_data[col] = lb_results ljung_box_df = pd.DataFrame.from_dict(ljung_box_values, orient="index") ljung_box_df.reset_index(inplace=True) ljung_box_df.columns = ["column", "stat", "pvalue"] - return ljung_box_df, RawData(ljung_box_raw=raw_data) + return ljung_box_df diff --git a/validmind/tests/data_validation/MutualInformation.py b/validmind/tests/data_validation/MutualInformation.py index 329c7a400..7c230576f 100644 --- a/validmind/tests/data_validation/MutualInformation.py +++ b/validmind/tests/data_validation/MutualInformation.py @@ -77,12 +77,6 @@ def MutualInformation( else: mi_scores = mutual_info_regression(X, y) - # Create DataFrame for raw data - raw_data = RawData( - features=dataset.feature_columns, - mutual_information_scores=mi_scores.tolist(), - ) - # Create Plotly figure fig = go.Figure() @@ -126,4 +120,8 @@ def MutualInformation( template="plotly_white", ) - return fig, raw_data + return fig, RawData( + mutual_information_scores={ + feature: score for feature, score in zip(sorted_features, sorted_scores) + } + ) diff --git a/validmind/tests/data_validation/ProtectedClassesDescription.py b/validmind/tests/data_validation/ProtectedClassesDescription.py index 038770a93..53f763705 100644 --- a/validmind/tests/data_validation/ProtectedClassesDescription.py +++ b/validmind/tests/data_validation/ProtectedClassesDescription.py @@ -6,7 +6,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.logging import get_logger logger = get_logger(__name__) @@ -127,4 +127,4 @@ def ProtectedClassesDescription(dataset, protected_classes=None): ["Protected Class", "Count"], ascending=[True, False] ) - return (stats_df, *tuple(figures), RawData(grouped_counts=counts)) + return (stats_df, *figures) diff --git a/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py b/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py index 924676da8..4ed285b8f 100644 --- a/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py +++ b/validmind/tests/data_validation/ProtectedClassesThresholdOptimizer.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt import pandas as pd -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import MissingDependencyError from validmind.logging import get_logger @@ -103,11 +103,7 @@ def ProtectedClassesThresholdOptimizer( test_df, target, y_pred_opt, protected_classes ) - return ( - {"DPR and EOR Table": fairness_metrics.reset_index()}, - fig, - RawData(threshold_optimizer=threshold_optimizer, y_pred_opt=y_pred_opt), - ) + return {"DPR and EOR Table": fairness_metrics.reset_index()}, fig def initialize_and_fit_optimizer(pipeline, X_train, y_train, protected_classes_df): diff --git a/validmind/tests/data_validation/RunsTest.py b/validmind/tests/data_validation/RunsTest.py index 8eaab7d31..7004b238d 100644 --- a/validmind/tests/data_validation/RunsTest.py +++ b/validmind/tests/data_validation/RunsTest.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.sandbox.stats.runs import runstest_1samp -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tasks("classification", "regression") @@ -69,4 +69,4 @@ def RunsTest(dataset): runs_test_df.reset_index(inplace=True) runs_test_df.columns = ["feature", "stat", "pvalue"] - return runs_test_df, RawData(runs_test_values=runs_test_values) + return runs_test_df diff --git a/validmind/tests/data_validation/ScatterPlot.py b/validmind/tests/data_validation/ScatterPlot.py index 748592592..a7e037475 100644 --- a/validmind/tests/data_validation/ScatterPlot.py +++ b/validmind/tests/data_validation/ScatterPlot.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt import seaborn as sns -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("tabular_data", "visualization") @@ -70,4 +70,4 @@ def ScatterPlot(dataset): plt.close("all") - return fig, RawData(dataset_dataframe=dataset.df) + return fig diff --git a/validmind/tests/data_validation/ShapiroWilk.py b/validmind/tests/data_validation/ShapiroWilk.py index a855ddf96..b41d0cae3 100644 --- a/validmind/tests/data_validation/ShapiroWilk.py +++ b/validmind/tests/data_validation/ShapiroWilk.py @@ -5,7 +5,7 @@ import pandas as pd from scipy import stats -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tasks("classification", "regression") @@ -66,4 +66,4 @@ def ShapiroWilk(dataset): sw_df.reset_index(inplace=True) sw_df.columns = ["column", "stat", "pvalue"] - return sw_df, RawData(statistics=sw_values) + return sw_df diff --git a/validmind/tests/data_validation/Skewness.py b/validmind/tests/data_validation/Skewness.py index df8f6e8fe..c472159fd 100644 --- a/validmind/tests/data_validation/Skewness.py +++ b/validmind/tests/data_validation/Skewness.py @@ -5,7 +5,7 @@ from ydata_profiling.config import Settings from ydata_profiling.model.typeset import ProfilingTypeSet -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("data_quality", "tabular_data") @@ -78,5 +78,4 @@ def Skewness(dataset, max_threshold=1): "Skewness Results for Dataset": results_table, }, passed, - RawData(skewness_values=skewness), ) diff --git a/validmind/tests/data_validation/SpreadPlot.py b/validmind/tests/data_validation/SpreadPlot.py index 0113a0ac0..835fbdf1d 100644 --- a/validmind/tests/data_validation/SpreadPlot.py +++ b/validmind/tests/data_validation/SpreadPlot.py @@ -95,4 +95,4 @@ def SpreadPlot(dataset: VMDataset): name=f"spread_{var1}_{var2}" ) - return tuple(figures) + (RawData(spread_data=spread_data),) + return (*figures, RawData(spread_data=spread_data)) diff --git a/validmind/tests/data_validation/TimeSeriesHistogram.py b/validmind/tests/data_validation/TimeSeriesHistogram.py index ac4de3011..fd38e1eee 100644 --- a/validmind/tests/data_validation/TimeSeriesHistogram.py +++ b/validmind/tests/data_validation/TimeSeriesHistogram.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.express as px -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.logging import get_logger logger = get_logger(__name__) @@ -64,7 +64,6 @@ def TimeSeriesHistogram(dataset, nbins=30): raise ValueError("Provided 'columns' must exist in the dataset") figures = [] - data_without_na = {} for col in columns: # Check for missing values and log if any are found missing_count = df[col].isna().sum() @@ -75,7 +74,6 @@ def TimeSeriesHistogram(dataset, nbins=30): # Drop missing values for the current column valid_data = df[~df[col].isna()] - data_without_na[col] = valid_data fig = px.histogram( valid_data, @@ -98,4 +96,4 @@ def TimeSeriesHistogram(dataset, nbins=30): ) figures.append(fig) - return (*figures, RawData(data_without_na=data_without_na)) + return tuple(figures) diff --git a/validmind/tests/data_validation/TimeSeriesLinePlot.py b/validmind/tests/data_validation/TimeSeriesLinePlot.py index 3f99af57c..4df6f1472 100644 --- a/validmind/tests/data_validation/TimeSeriesLinePlot.py +++ b/validmind/tests/data_validation/TimeSeriesLinePlot.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -78,4 +78,4 @@ def TimeSeriesLinePlot(dataset: VMDataset): figures.append(fig) - return (*figures, RawData(time_series_data=df[dataset.feature_columns_numeric])) + return tuple(figures) diff --git a/validmind/tests/data_validation/TimeSeriesOutliers.py b/validmind/tests/data_validation/TimeSeriesOutliers.py index 88a2fa2a6..cc6566b95 100644 --- a/validmind/tests/data_validation/TimeSeriesOutliers.py +++ b/validmind/tests/data_validation/TimeSeriesOutliers.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import SkipTestError from validmind.vm_models import VMDataset @@ -111,9 +111,4 @@ def TimeSeriesOutliers(dataset: VMDataset, zscore_threshold: int = 3): figures.append(fig) - return ( - outlier_df.sort_values(["Column", "Date"]), - figures, - len(outlier_df) == 0, - RawData(z_scores=z_scores), - ) + return (outlier_df.sort_values(["Column", "Date"]), figures, len(outlier_df) == 0) diff --git a/validmind/tests/data_validation/WOEBinTable.py b/validmind/tests/data_validation/WOEBinTable.py index 3c8e82172..94e05db0d 100644 --- a/validmind/tests/data_validation/WOEBinTable.py +++ b/validmind/tests/data_validation/WOEBinTable.py @@ -71,4 +71,4 @@ def WOEBinTable(dataset: VMDataset, breaks_adj: list = None): return { "Weight of Evidence (WoE) and Information Value (IV)": result_table - }, RawData(bins=bins) + }, RawData(woe_bins=bins) diff --git a/validmind/tests/data_validation/ZivotAndrewsArch.py b/validmind/tests/data_validation/ZivotAndrewsArch.py index 35f30aee6..ec2b560c6 100644 --- a/validmind/tests/data_validation/ZivotAndrewsArch.py +++ b/validmind/tests/data_validation/ZivotAndrewsArch.py @@ -6,7 +6,7 @@ from arch.unitroot import ZivotAndrews from numpy.linalg import LinAlgError -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import SkipTestError from validmind.logging import get_logger from validmind.vm_models import VMDataset @@ -65,7 +65,6 @@ def ZivotAndrewsArch(dataset: VMDataset): df = df.apply(pd.to_numeric, errors="coerce") za_values = [] - raw_data = {} for col in df.columns: try: @@ -84,14 +83,4 @@ def ZivotAndrewsArch(dataset: VMDataset): } ) - # Store intermediate raw data for each column - raw_data[col] = { - "stat": za.stat, - "pvalue": za.pvalue, - "usedlag": za.lags, - "nobs": za.nobs, - } - - return {"Zivot-Andrews Test Results": za_values}, RawData( - zivot_andrews_results=raw_data - ) + return {"Zivot-Andrews Test Results": za_values} diff --git a/validmind/tests/data_validation/nlp/CommonWords.py b/validmind/tests/data_validation/nlp/CommonWords.py index 617ce7c3a..21f21bd40 100644 --- a/validmind/tests/data_validation/nlp/CommonWords.py +++ b/validmind/tests/data_validation/nlp/CommonWords.py @@ -94,4 +94,4 @@ def create_corpus(df, text_column): xaxis_tickangle=-45, ) - return fig, RawData(word_frequencies=dict(most)) + return fig, RawData(words=x, frequencies=y) diff --git a/validmind/tests/data_validation/nlp/Punctuations.py b/validmind/tests/data_validation/nlp/Punctuations.py index ea2d5fed6..f846aa74c 100644 --- a/validmind/tests/data_validation/nlp/Punctuations.py +++ b/validmind/tests/data_validation/nlp/Punctuations.py @@ -64,7 +64,8 @@ def Punctuations(dataset, count_mode="token"): corpus = _create_corpus(dataset.df, dataset.text_column) punctuation_counts = _count_punctuations(corpus, count_mode) fig = _create_punctuation_plot(punctuation_counts) - return fig, RawData(punctuation_counts=dict(punctuation_counts)) + + return fig, RawData(punctuation_counts=punctuation_counts) def _create_punctuation_plot(punctuation_counts): diff --git a/validmind/tests/data_validation/nlp/StopWords.py b/validmind/tests/data_validation/nlp/StopWords.py index 2d9eb11ce..ac3236294 100644 --- a/validmind/tests/data_validation/nlp/StopWords.py +++ b/validmind/tests/data_validation/nlp/StopWords.py @@ -84,17 +84,17 @@ def create_corpus(df, text_column): nltk.download("stopwords", quiet=True) stop = set(stopwords.words("english")) - dic = defaultdict(int) + stop_word_frequencies = defaultdict(int) for word in corpus: if word in stop: - dic[word] += 1 + stop_word_frequencies[word] += 1 # Calculate the total number of words in the corpus total_words = len(corpus) # Calculate the percentage of each word in the corpus word_percentages = {} - for word, count in dic.items(): + for word, count in stop_word_frequencies.items(): percentage = (count / total_words) * 100 word_percentages[word] = percentage @@ -124,5 +124,5 @@ def create_corpus(df, text_column): }, fig, passed, - RawData(stop_word_frequencies=dic, total_words=total_words), + RawData(stop_word_frequencies=stop_word_frequencies, total_words=total_words), ) diff --git a/validmind/tests/model_validation/BleuScore.py b/validmind/tests/model_validation/BleuScore.py index 9a560a506..b524d3227 100644 --- a/validmind/tests/model_validation/BleuScore.py +++ b/validmind/tests/model_validation/BleuScore.py @@ -114,4 +114,4 @@ def BleuScore(dataset, model): # Create a DataFrame from all collected statistics result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - return (result_df, *figures, RawData(bleu_scores=metrics_df)) + return (result_df, *figures, RawData(bleu_scores_df=metrics_df)) diff --git a/validmind/tests/model_validation/RougeScore.py b/validmind/tests/model_validation/RougeScore.py index eccc38e9d..7d8f9a106 100644 --- a/validmind/tests/model_validation/RougeScore.py +++ b/validmind/tests/model_validation/RougeScore.py @@ -118,10 +118,8 @@ def RougeScore(dataset, model, metric="rouge-1"): {"p": "Precision", "r": "Recall", "f": "F1 Score"} ) - # Create a DataFrame from all collected statistics - result_df = pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}) - - # Include raw data - raw_data = RawData(score_list=score_list) - - return (result_df, *figures, raw_data) + return ( + pd.DataFrame(stats_df).reset_index().rename(columns={"index": "Metric"}), + *figures, + RawData(rouge_scores_df=df_scores), + ) diff --git a/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py b/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py index 118cff2d7..6d2c84134 100644 --- a/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py +++ b/validmind/tests/model_validation/TimeSeriesPredictionsPlot.py @@ -4,7 +4,7 @@ import plotly.graph_objects as go -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("model_predictions", "visualization") @@ -70,6 +70,4 @@ def TimeSeriesPredictionsPlot(dataset, model): template="plotly_white", ) - return fig, RawData( - actual_values=dataset.y, predicted_values=y_pred, time_index=time_index - ) + return fig diff --git a/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py b/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py index e13b4f572..79d644e5f 100644 --- a/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py +++ b/validmind/tests/model_validation/TimeSeriesR2SquareBySegments.py @@ -7,7 +7,7 @@ import plotly.express as px from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("model_performance", "sklearn") @@ -105,4 +105,4 @@ def TimeSeriesR2SquareBySegments(dataset, model, segments=None): }, ) - return fig, results_df, RawData(segments=segments) + return fig, results_df diff --git a/validmind/tests/model_validation/ToxicityScore.py b/validmind/tests/model_validation/ToxicityScore.py index 9ba8a9621..28f641046 100644 --- a/validmind/tests/model_validation/ToxicityScore.py +++ b/validmind/tests/model_validation/ToxicityScore.py @@ -143,8 +143,8 @@ def calculate_stats(df): result_df, *tuple(figures), RawData( - input_toxicity=input_toxicity, - true_toxicity=true_toxicity, - pred_toxicity=pred_toxicity, + input_toxicity_df=input_df, + true_toxicity_df=true_df, + pred_toxicity_df=pred_df, ), ) diff --git a/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py b/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py index fc599f9cb..543b96d24 100644 --- a/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py +++ b/validmind/tests/model_validation/embeddings/EmbeddingsVisualization2D.py @@ -89,4 +89,4 @@ def EmbeddingsVisualization2D( fig = px.scatter(**scatter_kwargs) fig.update_layout(width=500, height=500) - return fig, RawData(reduced_embeddings=reduced_embeddings) + return fig, RawData(tsne_embeddings=reduced_embeddings) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py index 86356b3de..d3d0785ae 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisKeyword.py @@ -91,10 +91,10 @@ def perturb_data(data: str): perturb_data ) - results = create_stability_analysis_result( + raw_data, results = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, ) - return results, RawData(perturbed_data=perturbed_df) + return results, RawData(original_perturbed_similarity=raw_data) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py index e845a4fb7..474dcb889 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisRandomNoise.py @@ -145,10 +145,10 @@ def perturb_data(data): perturb_data ) - result = create_stability_analysis_result( + raw_data, result = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, ) - return result, RawData(perturbed_text_data=perturbed_df) + return result, RawData(original_perturbed_similarity=raw_data) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py index fbdbd7d0e..02cc0118c 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisSynonyms.py @@ -101,8 +101,10 @@ def perturb_data(data): perturb_data ) - return create_stability_analysis_result( + raw_data, result = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, - ), RawData(original_data=original_df, perturbed_data=perturbed_df) + ) + + return result, RawData(original_perturbed_similarity=raw_data) diff --git a/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py b/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py index bb0697018..f8634c93a 100644 --- a/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py +++ b/validmind/tests/model_validation/embeddings/StabilityAnalysisTranslation.py @@ -128,10 +128,10 @@ def perturb_data(data): perturb_data ) - result = create_stability_analysis_result( + raw_data, result = create_stability_analysis_result( dataset.y_pred(model), model.predict(perturbed_df), mean_similarity_threshold, ) - return result, RawData(original_data=original_df, perturbed_data=perturbed_df) + return result, RawData(original_perturbed_similarity=raw_data) diff --git a/validmind/tests/model_validation/embeddings/utils.py b/validmind/tests/model_validation/embeddings/utils.py index 0be1b0fa8..727d68dc7 100644 --- a/validmind/tests/model_validation/embeddings/utils.py +++ b/validmind/tests/model_validation/embeddings/utils.py @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial import numpy as np +import pandas as pd import plotly.express as px from sklearn.metrics.pairwise import cosine_similarity @@ -17,10 +18,19 @@ def create_stability_analysis_result( original_embeddings, perturbed_embeddings ).diagonal() + # create a raw dataframe of the original, perturbed and similarity + raw_data = pd.DataFrame( + { + "original": original_embeddings, + "perturbed": perturbed_embeddings, + "similarity": similarities, + } + ) + mean = np.mean(similarities) passed = mean > mean_similarity_threshold - return ( + return raw_data, ( [ { "Mean Similarity": mean, diff --git a/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py b/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py index 83edafd95..71fa45394 100644 --- a/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py +++ b/validmind/tests/model_validation/sklearn/AdjustedMutualInformation.py @@ -4,7 +4,7 @@ from sklearn.metrics import adjusted_mutual_info_score -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -52,8 +52,11 @@ def AdjustedMutualInformation(model: VMModel, dataset: VMDataset): - The interpretability of the score can be complex as it depends on the understanding of information theory concepts. """ - ami_score = adjusted_mutual_info_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - return [{"Adjusted Mutual Information": ami_score}], RawData(ami_score=ami_score) + return [ + { + "Adjusted Mutual Information": adjusted_mutual_info_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + } + ] diff --git a/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py b/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py index 96138287f..6fd3cb181 100644 --- a/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py +++ b/validmind/tests/model_validation/sklearn/AdjustedRandIndex.py @@ -4,7 +4,7 @@ from sklearn.metrics import adjusted_rand_score -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -49,9 +49,11 @@ def AdjustedRandIndex(model: VMModel, dataset: VMDataset): - It may be difficult to interpret the implications of an ARI score without context or a benchmark, as it is heavily dependent on the characteristics of the dataset used. """ - ari_score = adjusted_rand_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - - return [{"Adjusted Rand Index": ari_score}], RawData(ari_score=ari_score) + return [ + { + "Adjusted Rand Index": adjusted_rand_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + } + ] diff --git a/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py b/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py index 30abe7796..0a4d4f442 100644 --- a/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py +++ b/validmind/tests/model_validation/sklearn/ClassifierThresholdOptimization.py @@ -8,7 +8,7 @@ from plotly.subplots import make_subplots from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -255,15 +255,4 @@ def ClassifierThresholdOptimization( # Create results table and sort by threshold descending table = pd.DataFrame(results).sort_values("threshold", ascending=False) - return ( - fig, - table, - RawData( - roc_data={"fpr": fpr, "tpr": tpr, "thresholds_roc": thresholds_roc}, - pr_data={ - "precision": precision, - "recall": recall, - "thresholds_pr": thresholds_pr, - }, - ), - ) + return fig, table diff --git a/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py b/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py index 40f5fe57b..e3d23ffe3 100644 --- a/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py +++ b/validmind/tests/model_validation/sklearn/ClusterPerformanceMetrics.py @@ -11,7 +11,7 @@ v_measure_score, ) -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel HOMOGENEITY = """ @@ -115,56 +115,53 @@ def ClusterPerformanceMetrics(model: VMModel, dataset: VMDataset): - Does not consider aspects like computational efficiency of the model or its capability to handle high dimensional data. """ - labels_true = dataset.y - labels_pred = dataset.y_pred(model) - return [ { "Metric": "Homogeneity Score", "Description": HOMOGENEITY, "Value": homogeneity_score( - labels_true=labels_true, - labels_pred=labels_pred, + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), ), }, { "Metric": "Completeness Score", "Description": COMPLETENESS, "Value": completeness_score( - labels_true=labels_true, - labels_pred=labels_pred, + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), ), }, { "Metric": "V Measure", "Description": V_MEASURE, "Value": v_measure_score( - labels_true=labels_true, - labels_pred=labels_pred, + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), ), }, { "Metric": "Adjusted Rand Index", "Description": ADJUSTED_RAND_INDEX, "Value": adjusted_rand_score( - labels_true=labels_true, - labels_pred=labels_pred, + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), ), }, { "Metric": "Adjusted Mutual Information", "Description": ADJUSTED_MUTUAL_INFORMATION, "Value": adjusted_mutual_info_score( - labels_true=labels_true, - labels_pred=labels_pred, + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), ), }, { "Metric": "Fowlkes-Mallows score", "Description": FOULKES_MALLOWS_SCORE, "Value": fowlkes_mallows_score( - labels_true=labels_true, - labels_pred=labels_pred, + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), ), }, - ], RawData(labels_true=labels_true, labels_pred=labels_pred) + ] diff --git a/validmind/tests/model_validation/sklearn/CompletenessScore.py b/validmind/tests/model_validation/sklearn/CompletenessScore.py index de5b916cf..26272822e 100644 --- a/validmind/tests/model_validation/sklearn/CompletenessScore.py +++ b/validmind/tests/model_validation/sklearn/CompletenessScore.py @@ -4,7 +4,7 @@ from sklearn.metrics import completeness_score -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -47,14 +47,11 @@ def CompletenessScore(model: VMModel, dataset: VMDataset): - The Completeness Score only applies to clustering models; it cannot be used for other types of machine learning models. """ - labels_true = dataset.y - labels_pred = dataset.y_pred(model) - - completeness = completeness_score( - labels_true=labels_true, - labels_pred=labels_pred, - ) - - return [{"Completeness Score": completeness}], RawData( - labels_true=labels_true, labels_pred=labels_pred - ) + return [ + { + "Completeness Score": completeness_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + } + ] diff --git a/validmind/tests/model_validation/sklearn/FeatureImportance.py b/validmind/tests/model_validation/sklearn/FeatureImportance.py index b73da1c00..61bcb4a01 100644 --- a/validmind/tests/model_validation/sklearn/FeatureImportance.py +++ b/validmind/tests/model_validation/sklearn/FeatureImportance.py @@ -5,7 +5,7 @@ import pandas as pd from sklearn.inspection import permutation_importance -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -52,8 +52,6 @@ def FeatureImportance(dataset: VMDataset, model: VMModel, num_features: int = 3) - The function's output is dependent on the number of features specified by `num_features`, which defaults to 3 but can be adjusted. """ - results_list = [] - pfi_values = permutation_importance( estimator=model.model, X=dataset.x_df(), @@ -61,8 +59,6 @@ def FeatureImportance(dataset: VMDataset, model: VMModel, num_features: int = 3) random_state=0, n_jobs=-2, ) - - # Create a dictionary to store PFI scores pfi = { column: pfi_values["importances_mean"][i] for i, column in enumerate(dataset.feature_columns) @@ -70,14 +66,10 @@ def FeatureImportance(dataset: VMDataset, model: VMModel, num_features: int = 3) # Sort features by their importance sorted_features = sorted(pfi.items(), key=lambda item: item[1], reverse=True) - - # Extract the top `num_features` features top_features = sorted_features[:num_features] - # Prepare the result for the current model and dataset result = {} - # Dynamically add feature columns to the result for i in range(num_features): if i < len(top_features): result[ @@ -86,10 +78,4 @@ def FeatureImportance(dataset: VMDataset, model: VMModel, num_features: int = 3) else: result[f"Feature {i + 1}"] = None - # Append the result to the list - results_list.append(result) - - # Convert the results list to a DataFrame - results_df = pd.DataFrame(results_list) - - return results_df, RawData(permutation_importance_scores=pfi_values) + return pd.DataFrame([result]) diff --git a/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py b/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py index 44cb9d3f4..04b9b08b3 100644 --- a/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py +++ b/validmind/tests/model_validation/sklearn/FowlkesMallowsScore.py @@ -4,7 +4,7 @@ from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -52,11 +52,11 @@ def FowlkesMallowsScore(dataset: VMDataset, model: VMModel): - It does not handle mismatching numbers of clusters between the true and predicted labels. As such, it may return misleading results if the predicted labels suggest a different number of clusters than what is in the true labels. """ - score = metrics.fowlkes_mallows_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - - return [{"Fowlkes-Mallows score": score}], RawData( - true_labels=dataset.y, predicted_labels=dataset.y_pred(model) - ) + return [ + { + "Fowlkes-Mallows score": metrics.fowlkes_mallows_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + } + ] diff --git a/validmind/tests/model_validation/sklearn/HomogeneityScore.py b/validmind/tests/model_validation/sklearn/HomogeneityScore.py index fd595ac07..1fedd83bd 100644 --- a/validmind/tests/model_validation/sklearn/HomogeneityScore.py +++ b/validmind/tests/model_validation/sklearn/HomogeneityScore.py @@ -4,7 +4,7 @@ from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -50,13 +50,11 @@ class labels of the training and testing sets with the labels predicted by the g - The score does not address the actual number of clusters formed, or the evenness of cluster sizes. It only checks the homogeneity within the given clusters created by the model. """ - - homogeneity_score = metrics.homogeneity_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - - return ( - [{"Homogeneity Score": homogeneity_score}], - RawData(labels_true=dataset.y, labels_pred=dataset.y_pred(model)), - ) + return [ + { + "Homogeneity Score": metrics.homogeneity_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + } + ] diff --git a/validmind/tests/model_validation/sklearn/HyperParametersTuning.py b/validmind/tests/model_validation/sklearn/HyperParametersTuning.py index 4e8cc34d4..dd90a44af 100644 --- a/validmind/tests/model_validation/sklearn/HyperParametersTuning.py +++ b/validmind/tests/model_validation/sklearn/HyperParametersTuning.py @@ -7,7 +7,7 @@ from sklearn.metrics import make_scorer, recall_score from sklearn.model_selection import GridSearchCV -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -135,8 +135,6 @@ def HyperParametersTuning( metrics = _get_metrics(scoring) thresholds = _get_thresholds(thresholds) - raw_data = {} - for threshold in thresholds: scoring_dict = _create_scoring_dict(scoring, metrics, threshold) @@ -164,8 +162,4 @@ def HyperParametersTuning( results.append(row_result) - # Store intermediate data for each (optimize_for, threshold) combination - raw_data_key = f"{optimize_for}_threshold_{threshold}" - raw_data[raw_data_key] = estimators.cv_results_ - - return results, RawData(grid_search_results=raw_data) + return results diff --git a/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py b/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py index f4a00c55c..1a754f5e6 100644 --- a/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py +++ b/validmind/tests/model_validation/sklearn/MinimumROCAUCScore.py @@ -6,8 +6,7 @@ from sklearn.metrics import roc_auc_score from sklearn.preprocessing import LabelBinarizer -from validmind import RawData -from validmind.tests import tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -76,13 +75,10 @@ def MinimumROCAUCScore(dataset: VMDataset, model: VMModel, min_threshold: float y_score_prob = dataset.y_prob(model) roc_auc = roc_auc_score(y_true=y_true, y_score=y_score_prob) - table = [ + return [ { "Score": roc_auc, "Threshold": min_threshold, "Pass/Fail": "Pass" if roc_auc > min_threshold else "Fail", } - ] - pass_fail = roc_auc > min_threshold - - return table, pass_fail, RawData(y_true=y_true, roc_auc=roc_auc) + ], roc_auc > min_threshold diff --git a/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py b/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py index 96506801d..28c802e09 100644 --- a/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py +++ b/validmind/tests/model_validation/sklearn/OverfitDiagnosis.py @@ -10,7 +10,7 @@ import seaborn as sns from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -299,8 +299,4 @@ def OverfitDiagnosis( } ) - return ( - {"Overfit Diagnosis": test_results}, - *figures, - RawData(train_metrics=results_train, test_metrics=results_test), - ) + return ({"Overfit Diagnosis": test_results}, *figures) diff --git a/validmind/tests/model_validation/sklearn/RegressionErrors.py b/validmind/tests/model_validation/sklearn/RegressionErrors.py index a819f1737..f398d6d21 100644 --- a/validmind/tests/model_validation/sklearn/RegressionErrors.py +++ b/validmind/tests/model_validation/sklearn/RegressionErrors.py @@ -6,7 +6,7 @@ import pandas as pd from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("sklearn", "model_performance") @@ -83,4 +83,4 @@ def _regression_errors(y_true, y_pred): } ) - return results_df, RawData(y_true=y_true, y_pred=y_pred) + return results_df diff --git a/validmind/tests/model_validation/sklearn/RegressionPerformance.py b/validmind/tests/model_validation/sklearn/RegressionPerformance.py index d389ce96d..2ffdbd6cf 100644 --- a/validmind/tests/model_validation/sklearn/RegressionPerformance.py +++ b/validmind/tests/model_validation/sklearn/RegressionPerformance.py @@ -5,7 +5,7 @@ import numpy as np from sklearn.metrics import mean_absolute_error, mean_squared_error -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -80,4 +80,4 @@ def RegressionPerformance(model: VMModel, dataset: VMDataset): "Value": value, } for metric, value in metrics.items() - ], RawData(y_true=y_true, y_pred=y_pred) + ] diff --git a/validmind/tests/model_validation/sklearn/RegressionR2Square.py b/validmind/tests/model_validation/sklearn/RegressionR2Square.py index 8b8d58a98..6f6dc6bfc 100644 --- a/validmind/tests/model_validation/sklearn/RegressionR2Square.py +++ b/validmind/tests/model_validation/sklearn/RegressionR2Square.py @@ -51,17 +51,15 @@ def RegressionR2Square(dataset, model): violated. - Does not provide insight on whether the correct regression model was used or if key assumptions have been met. """ - y_true = dataset.y y_pred = dataset.y_pred(model) y_true = y_true.astype(y_pred.dtype) - r2s = metrics.r2_score(y_true, y_pred) - adj_r2 = adj_r2_score(y_true, y_pred, len(y_true), len(dataset.feature_columns)) - - # Create dataframe with R2 and Adjusted R2 in one row - results_df = pd.DataFrame( - {"R-squared (R2) Score": [r2s], "Adjusted R-squared (R2) Score": [adj_r2]} + return pd.DataFrame( + { + "R-squared (R2) Score": [metrics.r2_score(y_true, y_pred)], + "Adjusted R-squared (R2) Score": [ + adj_r2_score(y_true, y_pred, len(y_true), len(dataset.feature_columns)) + ], + } ) - - return results_df diff --git a/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py b/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py index f5990dbff..5245bd8e2 100644 --- a/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py +++ b/validmind/tests/model_validation/sklearn/RegressionR2SquareComparison.py @@ -5,7 +5,7 @@ import pandas as pd from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.tests.model_validation.statsmodels.statsutils import adj_r2_score @@ -77,6 +77,4 @@ def RegressionR2SquareComparison(datasets, models): } ) - # Convert results list to a DataFrame - results_df = pd.DataFrame(results_list) - return results_df, RawData(r2_values=results_df) + return pd.DataFrame(results_list) diff --git a/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py b/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py index b4a36635f..a7e162b83 100644 --- a/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py +++ b/validmind/tests/model_validation/sklearn/SHAPGlobalImportance.py @@ -229,5 +229,5 @@ def SHAPGlobalImportance( return ( generate_shap_plot("mean", shap_values, shap_sample), generate_shap_plot("summary", shap_values, shap_sample), - RawData(shap_values=shap_values), + RawData(shap_values=shap_values, shap_sample=shap_sample), ) diff --git a/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py b/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py index 28e86f900..7246ca573 100644 --- a/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py +++ b/validmind/tests/model_validation/sklearn/ScoreProbabilityAlignment.py @@ -5,7 +5,7 @@ import pandas as pd import plotly.graph_objects as go -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -128,4 +128,4 @@ def ScoreProbabilityAlignment( height=600, ) - return results_df, fig, RawData(score_bin_data=df) + return results_df, fig diff --git a/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py b/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py index 61b6313a3..1e023ea09 100644 --- a/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py +++ b/validmind/tests/model_validation/sklearn/TrainingTestDegradation.py @@ -103,7 +103,7 @@ def TrainingTestDegradation( table, all(row["Pass/Fail"] == "Pass" for row in table), RawData( - train_classification_report=ds1_report, - test_classification_report=ds2_report, + dataset_1_report=ds1_report, + dataset_2_report=ds2_report, ), ) diff --git a/validmind/tests/model_validation/sklearn/VMeasure.py b/validmind/tests/model_validation/sklearn/VMeasure.py index 79cb569ee..2a86c7390 100644 --- a/validmind/tests/model_validation/sklearn/VMeasure.py +++ b/validmind/tests/model_validation/sklearn/VMeasure.py @@ -4,7 +4,7 @@ from sklearn import metrics -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @@ -48,12 +48,11 @@ def VMeasure(dataset: VMDataset, model: VMModel): the other. The V Measure Score does not provide flexibility in assigning different weights to homogeneity and completeness. """ - v_measure = metrics.v_measure_score( - labels_true=dataset.y, - labels_pred=dataset.y_pred(model), - ) - - # Store raw data needed to recalculate V Measure - raw_data = RawData(labels_true=dataset.y, labels_pred=dataset.y_pred(model)) - - return [{"V Measure": v_measure}], raw_data + return [ + { + "V Measure": metrics.v_measure_score( + labels_true=dataset.y, + labels_pred=dataset.y_pred(model), + ) + } + ] diff --git a/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py b/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py index 63838adec..468b82bb2 100644 --- a/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py +++ b/validmind/tests/model_validation/sklearn/WeakspotsDiagnosis.py @@ -9,8 +9,7 @@ import seaborn as sns from sklearn import metrics -from validmind import RawData -from validmind.tests import tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel DEFAULT_METRICS = { @@ -295,8 +294,4 @@ def WeakspotsDiagnosis( ).sort_values(["Feature", "Dataset"]), *figures, passed, - RawData( - metrics_results_training=pd.DataFrame(results_1), - metrics_results_test=pd.DataFrame(results_2), - ), ) diff --git a/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py b/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py index 0ad422278..4f069acc1 100644 --- a/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py +++ b/validmind/tests/model_validation/statsmodels/CumulativePredictionProbabilities.py @@ -62,9 +62,9 @@ def CumulativePredictionProbabilities(dataset, model, title="Cumulative Probabil df = dataset.df df["probabilities"] = dataset.y_prob(model) - fig = _plot_cumulative_prob(df, dataset.target_column, title) + fig, fig_data = _plot_cumulative_prob(df, dataset.target_column, title) - return fig, RawData(probabilities_df=df) + return fig, RawData(cumulative_probabilities=fig_data) def _plot_cumulative_prob(df, target_col, title): @@ -82,10 +82,17 @@ def _plot_cumulative_prob(df, target_col, title): cls: f"rgb({int(rgb[0]*255)}, {int(rgb[1]*255)}, {int(rgb[2]*255)})" for cls, rgb in zip(classes, colors) } + + raw_data = {} + for class_value in sorted(df[target_col].unique()): # Calculate cumulative distribution for the current class sorted_probs = np.sort(df[df[target_col] == class_value]["probabilities"]) cumulative_probs = np.cumsum(sorted_probs) / np.sum(sorted_probs) + raw_data[class_value] = { + "sorted_probs": sorted_probs, + "cumulative_probs": cumulative_probs, + } fig.add_trace( go.Scatter( @@ -104,4 +111,4 @@ def _plot_cumulative_prob(df, target_col, title): yaxis_title="Cumulative Distribution", ) - return fig + return fig, raw_data diff --git a/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py b/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py index a252aa336..759e15553 100644 --- a/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py +++ b/validmind/tests/model_validation/statsmodels/DurbinWatsonTest.py @@ -5,7 +5,7 @@ import pandas as pd from statsmodels.stats.stattools import durbin_watson -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tasks("regression") @@ -75,12 +75,10 @@ def get_autocorrelation(dw_value, threshold): else: return "No autocorrelation" - results = pd.DataFrame( + return pd.DataFrame( { "dw_statistic": [dw_statistic], "threshold": [str(threshold)], "autocorrelation": [get_autocorrelation(dw_statistic, threshold)], } ) - - return results, RawData(residuals=residuals) diff --git a/validmind/tests/model_validation/statsmodels/GINITable.py b/validmind/tests/model_validation/statsmodels/GINITable.py index fb476e35e..5b3b5f1ca 100644 --- a/validmind/tests/model_validation/statsmodels/GINITable.py +++ b/validmind/tests/model_validation/statsmodels/GINITable.py @@ -61,28 +61,19 @@ def GINITable(dataset, model): - The test does not incorporate a method to efficiently handle missing or inefficiently processed data, which could lead to inaccuracies in the metrics if the data is not appropriately preprocessed. """ - - metrics_dict = {"AUC": [], "GINI": [], "KS": []} - - # Retrieve y_true and y_pred for the current dataset y_true = np.ravel(dataset.y) # Flatten y_true to make it one-dimensional y_prob = dataset.y_prob(model) - - # Compute metrics y_true = np.array(y_true, dtype=float) y_prob = np.array(y_prob, dtype=float) fpr, tpr, _ = roc_curve(y_true, y_prob) - ks = max(tpr - fpr) auc = roc_auc_score(y_true, y_prob) gini = 2 * auc - 1 - # Add the metrics to the dictionary - metrics_dict["AUC"].append(auc) - metrics_dict["GINI"].append(gini) - metrics_dict["KS"].append(ks) - - # Create a DataFrame to store and return the results - metrics_df = pd.DataFrame(metrics_dict) - - return metrics_df, RawData(true_positive_rate=tpr, false_positive_rate=fpr) + return pd.DataFrame( + { + "AUC": [auc], + "GINI": [gini], + "KS": [max(tpr - fpr)], + } + ) diff --git a/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py b/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py index ea563f141..f4042d2c7 100644 --- a/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py +++ b/validmind/tests/model_validation/statsmodels/KolmogorovSmirnov.py @@ -4,7 +4,7 @@ from statsmodels.stats.diagnostic import kstest_normal -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import InvalidTestParametersError from validmind.vm_models import VMDataset, VMModel @@ -66,4 +66,4 @@ def KolmogorovSmirnov(model: VMModel, dataset: VMDataset, dist: str = "norm"): "P-Value": result["pvalue"], } for k, result in ks_values.items() - ], RawData(ks_results=ks_values) + ] diff --git a/validmind/tests/model_validation/statsmodels/Lilliefors.py b/validmind/tests/model_validation/statsmodels/Lilliefors.py index fc1c1f029..d17518e55 100644 --- a/validmind/tests/model_validation/statsmodels/Lilliefors.py +++ b/validmind/tests/model_validation/statsmodels/Lilliefors.py @@ -4,13 +4,13 @@ from statsmodels.stats.diagnostic import lilliefors -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel @tags("tabular_data", "data_distribution", "statistical_test", "statsmodels") @tasks("classification", "regression") -def Lilliefors(model: VMModel, dataset: VMDataset): +def Lilliefors(dataset: VMDataset): """ Assesses the normality of feature distributions in an ML model's training dataset using the Lilliefors test. @@ -70,4 +70,4 @@ def Lilliefors(model: VMModel, dataset: VMDataset): } ) - return table, RawData(test_statistics=table) + return table diff --git a/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py b/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py index db4f2b152..964ab5aca 100644 --- a/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py +++ b/validmind/tests/model_validation/statsmodels/PredictionProbabilitiesHistogram.py @@ -6,7 +6,7 @@ import plotly.graph_objects as go from matplotlib import cm -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("visualization", "credit_risk") @@ -60,7 +60,7 @@ def PredictionProbabilitiesHistogram( fig = _plot_prob_histogram(df, dataset.target_column, title) - return fig, RawData(probabilities_df=df) + return fig def _plot_prob_histogram(df, target_col, title): diff --git a/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py b/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py index f90070719..70f92b3ac 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py +++ b/validmind/tests/model_validation/statsmodels/RegressionCoeffs.py @@ -7,7 +7,7 @@ import plotly.graph_objects as go from scipy import stats -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.errors import SkipTestError @@ -97,4 +97,4 @@ def RegressionCoeffs(model): yaxis_title="Coefficients", ) - return fig, RawData(coefficients=coefficients) + return fig, coefficients diff --git a/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py b/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py index 4711dced7..3094d51c7 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py +++ b/validmind/tests/model_validation/statsmodels/RegressionFeatureSignificance.py @@ -90,6 +90,4 @@ def RegressionFeatureSignificance( plt.close() - raw_data = RawData(coefficients=coefficients, pvalues=pvalues) - - return fig, raw_data + return fig, RawData(coefficients=coefficients, pvalues=pvalues) diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py index 841e9660c..820202f16 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlot.py @@ -7,7 +7,7 @@ import matplotlib.pyplot as plt import pandas as pd -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.logging import get_logger from validmind.vm_models import VMDataset, VMModel @@ -87,12 +87,4 @@ def RegressionModelForecastPlot( plt.close() - # Prepare raw data - raw_data = RawData( - observed_values=pd.DataFrame({"index": index, "observed": dataset.y}), - forecasted_values=pd.DataFrame( - {"index": index, "forecast": dataset.y_pred(model)} - ), - ) - - return fig, raw_data + return fig diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py index 04bbeb4fa..c634a300d 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelForecastPlotLevels.py @@ -92,6 +92,6 @@ def RegressionModelForecastPlotLevels( plt.close() return fig, RawData( - dataset_y_transformed=dataset_y_transformed, + y_transformed=dataset_y_transformed, y_pred_transformed=y_pred_transformed, ) diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py b/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py index f022e9d39..d3635fd0c 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelSensitivityPlot.py @@ -111,4 +111,7 @@ def RegressionModelSensitivityPlot( plt.close() - return fig, RawData(shocked_dfs=shocked_dfs, predictions=predictions) + return fig, RawData( + transformed_target=transformed_target, + transformed_predictions=transformed_predictions, + ) diff --git a/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py b/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py index 7a374a198..62f4ac258 100644 --- a/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py +++ b/validmind/tests/model_validation/statsmodels/RegressionModelSummary.py @@ -4,7 +4,7 @@ from sklearn.metrics import mean_squared_error, r2_score -from validmind import RawData, tags, tasks +from validmind import tags, tasks from validmind.vm_models import VMDataset, VMModel from .statsutils import adj_r2_score @@ -45,19 +45,17 @@ def RegressionModelSummary(dataset: VMDataset, model: VMModel): - A high R-Squared or Adjusted R-Squared may not necessarily indicate a good model, especially in cases of overfitting. """ - y_true = dataset.y - y_pred = dataset.y_pred(model) - - results = [ + return [ { "Independent Variables": dataset.feature_columns, - "R-Squared": r2_score(y_true, y_pred), + "R-Squared": r2_score(dataset.y, dataset.y_pred(model)), "Adjusted R-Squared": adj_r2_score( - y_true, y_pred, len(y_true), len(dataset.feature_columns) + dataset.y, + dataset.y_pred(model), + len(dataset.y), + len(dataset.feature_columns), ), - "MSE": mean_squared_error(y_true=y_true, y_pred=y_pred, squared=True), - "RMSE": mean_squared_error(y_true=y_true, y_pred=y_pred, squared=False), + "MSE": mean_squared_error(dataset.y, dataset.y_pred(model), squared=True), + "RMSE": mean_squared_error(dataset.y, dataset.y_pred(model), squared=False), } ] - - return results, RawData(y_true=y_true, y_pred=y_pred) diff --git a/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py b/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py index bd4e719fd..29678e5af 100644 --- a/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py +++ b/validmind/tests/model_validation/statsmodels/ScorecardHistogram.py @@ -5,7 +5,7 @@ import plotly.graph_objects as go from matplotlib import cm -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("visualization", "credit_risk", "logistic_regression") @@ -66,7 +66,7 @@ def ScorecardHistogram(dataset, title="Histogram of Scores", score_column="score fig = _plot_score_histogram(df, score_column, dataset.target_column, title) - return fig, RawData(score_data=df[[score_column, dataset.target_column]]) + return fig def _plot_score_histogram(df, score_col, target_col, title): diff --git a/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py b/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py index 0e4035e2e..963f46c43 100644 --- a/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py +++ b/validmind/tests/ongoing_monitoring/PredictionAcrossEachFeature.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("visualization") @@ -79,4 +79,4 @@ def PredictionAcrossEachFeature(datasets, model): figures.append(fig) plt.close() - return (*figures, RawData(df_reference=df_reference, df_monitoring=df_monitoring)) + return tuple(figures) diff --git a/validmind/tests/prompt_validation/Bias.py b/validmind/tests/prompt_validation/Bias.py index b5cc6b552..07040426a 100644 --- a/validmind/tests/prompt_validation/Bias.py +++ b/validmind/tests/prompt_validation/Bias.py @@ -108,12 +108,14 @@ def Bias(model, min_threshold=7): passed = score > min_threshold return ( - { - "Score": score, - "Explanation": explanation, - "Threshold": min_threshold, - "Pass/Fail": "Pass" if passed else "Fail", - }, + [ + { + "Score": score, + "Explanation": explanation, + "Threshold": min_threshold, + "Pass/Fail": "Pass" if passed else "Fail", + } + ], passed, RawData(response=response), ) diff --git a/validmind/tests/prompt_validation/Clarity.py b/validmind/tests/prompt_validation/Clarity.py index 44123539f..14721e382 100644 --- a/validmind/tests/prompt_validation/Clarity.py +++ b/validmind/tests/prompt_validation/Clarity.py @@ -96,13 +96,15 @@ def Clarity(model, min_threshold=7): passed = score > min_threshold - table = [ - { - "Score": score, - "Explanation": explanation, - "Threshold": min_threshold, - "Pass/Fail": "Pass" if passed else "Fail", - } - ] - - return (table, passed, RawData(response=response)) + return ( + [ + { + "Score": score, + "Explanation": explanation, + "Threshold": min_threshold, + "Pass/Fail": "Pass" if passed else "Fail", + } + ], + passed, + RawData(response=response), + ) diff --git a/validmind/tests/prompt_validation/NegativeInstruction.py b/validmind/tests/prompt_validation/NegativeInstruction.py index efe2e76ac..ad0039d54 100644 --- a/validmind/tests/prompt_validation/NegativeInstruction.py +++ b/validmind/tests/prompt_validation/NegativeInstruction.py @@ -106,13 +106,16 @@ def NegativeInstruction(model, min_threshold=7): explanation = get_explanation(response) passed = score > min_threshold - result = [ - { - "Score": score, - "Threshold": min_threshold, - "Explanation": explanation, - "Pass/Fail": "Pass" if passed else "Fail", - } - ] - - return result, passed, RawData(model_response=response) + + return ( + [ + { + "Score": score, + "Threshold": min_threshold, + "Explanation": explanation, + "Pass/Fail": "Pass" if passed else "Fail", + } + ], + passed, + RawData(model_response=response), + ) From 4a887731ae1546598fcd9915b6a382fc004893bb Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 16:32:27 -0500 Subject: [PATCH 09/13] chore: linter complaints --- validmind/tests/model_validation/statsmodels/GINITable.py | 2 +- validmind/tests/model_validation/statsmodels/Lilliefors.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/validmind/tests/model_validation/statsmodels/GINITable.py b/validmind/tests/model_validation/statsmodels/GINITable.py index 5b3b5f1ca..5e90dd8d3 100644 --- a/validmind/tests/model_validation/statsmodels/GINITable.py +++ b/validmind/tests/model_validation/statsmodels/GINITable.py @@ -6,7 +6,7 @@ import pandas as pd from sklearn.metrics import roc_auc_score, roc_curve -from validmind import RawData, tags, tasks +from validmind import tags, tasks @tags("model_performance") diff --git a/validmind/tests/model_validation/statsmodels/Lilliefors.py b/validmind/tests/model_validation/statsmodels/Lilliefors.py index d17518e55..e51307d71 100644 --- a/validmind/tests/model_validation/statsmodels/Lilliefors.py +++ b/validmind/tests/model_validation/statsmodels/Lilliefors.py @@ -5,7 +5,7 @@ from statsmodels.stats.diagnostic import lilliefors from validmind import tags, tasks -from validmind.vm_models import VMDataset, VMModel +from validmind.vm_models import VMDataset @tags("tabular_data", "data_distribution", "statistical_test", "statsmodels") From ac8b7c5d7f65a14e9ccddf3b2f280a481d74c6bb Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 16:51:20 -0500 Subject: [PATCH 10/13] fix: fixing unit tests --- .../test_IsolationForestOutliers.py | 6 +----- .../data_validation/test_JarqueBera.py | 7 ++----- tests/unit_tests/data_validation/test_KPSS.py | 10 +++------- .../data_validation/test_LJungBox.py | 6 +----- .../data_validation/test_RunsTest.py | 5 +---- .../data_validation/test_ScatterPlot.py | 13 ++---------- .../data_validation/test_ShapiroWilk.py | 8 ++------ .../data_validation/test_Skewness.py | 5 ++--- .../test_TimeSeriesHistogram.py | 13 ++++-------- .../sklearn/test_RegressionErrors.py | 12 ++++------- .../statsmodels/test_DurbinWatsonTest.py | 11 ++-------- .../statsmodels/test_GINITable.py | 11 ++++------ .../test_PredictionProbabilitiesHistogram.py | 15 +++++++------- .../statsmodels/test_RegressionCoeffs.py | 6 +++--- .../statsmodels/test_ScorecardHistogram.py | 20 ++++++++----------- .../test_TimeSeriesPredictionsPlot.py | 10 ++++------ .../test_TimeSeriesR2SquareBySegments.py | 10 +++------- 17 files changed, 53 insertions(+), 115 deletions(-) diff --git a/tests/unit_tests/data_validation/test_IsolationForestOutliers.py b/tests/unit_tests/data_validation/test_IsolationForestOutliers.py index 9d67cb7a6..4a49371b9 100644 --- a/tests/unit_tests/data_validation/test_IsolationForestOutliers.py +++ b/tests/unit_tests/data_validation/test_IsolationForestOutliers.py @@ -6,7 +6,6 @@ IsolationForestOutliers, ) import matplotlib.pyplot as plt -from validmind import RawData class TestIsolationForestOutliers(unittest.TestCase): @@ -35,7 +34,7 @@ def test_outliers_detection(self): self.assertIsInstance(result, tuple) # Separate figures and raw data - *figures, raw_data = result + figures = result # Check that at least one figure is returned self.assertGreater(len(figures), 0) @@ -44,9 +43,6 @@ def test_outliers_detection(self): for fig in figures: self.assertIsInstance(fig, plt.Figure) - # Check raw data - self.assertIsInstance(raw_data, RawData) - def test_feature_columns_validation(self): # Test with valid feature columns try: diff --git a/tests/unit_tests/data_validation/test_JarqueBera.py b/tests/unit_tests/data_validation/test_JarqueBera.py index 6bf29a3c1..bd7bed930 100644 --- a/tests/unit_tests/data_validation/test_JarqueBera.py +++ b/tests/unit_tests/data_validation/test_JarqueBera.py @@ -1,7 +1,7 @@ import unittest import pandas as pd import validmind as vm -from validmind.tests.data_validation.JarqueBera import JarqueBera, RawData +from validmind.tests.data_validation.JarqueBera import JarqueBera class TestJarqueBera(unittest.TestCase): @@ -29,7 +29,7 @@ def test_returns_dataframe_and_rawdata(self): ) # Run the function - result, raw_data = JarqueBera(vm_dataset) + result = JarqueBera(vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -40,6 +40,3 @@ def test_returns_dataframe_and_rawdata(self): # Check if the DataFrame has the expected number of rows (one for each numeric feature) self.assertEqual(len(result), len(vm_dataset.feature_columns_numeric)) - - # Check if raw_data is an instance of RawData - self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_KPSS.py b/tests/unit_tests/data_validation/test_KPSS.py index 6f4e7b1c6..8d6827350 100644 --- a/tests/unit_tests/data_validation/test_KPSS.py +++ b/tests/unit_tests/data_validation/test_KPSS.py @@ -3,7 +3,6 @@ import numpy as np import validmind as vm from validmind.tests.data_validation.KPSS import KPSS -from validmind import RawData class TestKPSS(unittest.TestCase): @@ -34,7 +33,7 @@ def setUp(self): ) def test_kpss_structure(self): - result, raw_data = KPSS(self.vm_dataset) + result = KPSS(self.vm_dataset) # Check basic structure self.assertIsInstance(result, dict) @@ -51,11 +50,8 @@ def test_kpss_structure(self): self.assertIn("usedlag", column_result) self.assertIn("critical_values", column_result) - # Check raw data instance - self.assertIsInstance(raw_data, RawData) - def test_kpss_results(self): - result, _ = KPSS(self.vm_dataset) + result = KPSS(self.vm_dataset) kpss_results = result["KPSS Test Results"] # Get results for each series @@ -78,7 +74,7 @@ def test_kpss_results(self): self.assertLess(stationary_result["stat"], non_stationary_result["stat"]) def test_critical_values(self): - result, _ = KPSS(self.vm_dataset) + result = KPSS(self.vm_dataset) kpss_results = result["KPSS Test Results"] for column_result in kpss_results: diff --git a/tests/unit_tests/data_validation/test_LJungBox.py b/tests/unit_tests/data_validation/test_LJungBox.py index 974ec64f4..3880dcf3d 100644 --- a/tests/unit_tests/data_validation/test_LJungBox.py +++ b/tests/unit_tests/data_validation/test_LJungBox.py @@ -2,7 +2,6 @@ import pandas as pd import validmind as vm from validmind.tests.data_validation.LJungBox import LJungBox -from validmind import RawData class TestLJungBox(unittest.TestCase): @@ -23,7 +22,7 @@ def test_returns_dataframe_with_expected_shape(self): ) # Run the function - result, raw_data = LJungBox(vm_dataset) + result = LJungBox(vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -34,6 +33,3 @@ def test_returns_dataframe_with_expected_shape(self): # Check if the DataFrame has the expected number of rows (one for each column) self.assertEqual(len(result), len(df.columns)) - - # Check if raw_data is an instance of RawData - self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/data_validation/test_RunsTest.py b/tests/unit_tests/data_validation/test_RunsTest.py index 215bf2066..7666c9abf 100644 --- a/tests/unit_tests/data_validation/test_RunsTest.py +++ b/tests/unit_tests/data_validation/test_RunsTest.py @@ -32,7 +32,7 @@ def test_returns_dataframe_and_raw_data(self): ) # Run the function - result, raw_data = RunsTest(vm_dataset) + result = RunsTest(vm_dataset) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) @@ -43,6 +43,3 @@ def test_returns_dataframe_and_raw_data(self): # Check if the DataFrame has the expected number of rows (one for each numeric feature) self.assertEqual(len(result), len(vm_dataset.feature_columns_numeric)) - - # Check if raw_data is instance of RawData - self.assertIsInstance(raw_data, vm.RawData) diff --git a/tests/unit_tests/data_validation/test_ScatterPlot.py b/tests/unit_tests/data_validation/test_ScatterPlot.py index e171bef8c..c5034a59b 100644 --- a/tests/unit_tests/data_validation/test_ScatterPlot.py +++ b/tests/unit_tests/data_validation/test_ScatterPlot.py @@ -32,19 +32,10 @@ def setUp(self): def test_returns_tuple_of_figures_and_raw_data(self): # Run the function - result = ScatterPlot(self.vm_dataset) - - # Check if result is a tuple - self.assertIsInstance(result, tuple) - - # Check if the tuple contains exactly two elements - self.assertEqual(len(result), 2) + figure = ScatterPlot(self.vm_dataset) # Check if the first element is a matplotlib Figure - self.assertIsInstance(result[0], plt.Figure) - - # Check if the second element is an instance of RawData - self.assertIsInstance(result[1], vm.RawData) + self.assertIsInstance(figure, plt.Figure) # Check if all figures are properly closed self.assertEqual(len(plt.get_fignums()), 0) diff --git a/tests/unit_tests/data_validation/test_ShapiroWilk.py b/tests/unit_tests/data_validation/test_ShapiroWilk.py index b06da95e7..59989998e 100644 --- a/tests/unit_tests/data_validation/test_ShapiroWilk.py +++ b/tests/unit_tests/data_validation/test_ShapiroWilk.py @@ -3,7 +3,6 @@ import pandas as pd import validmind as vm from validmind.tests.data_validation.ShapiroWilk import ShapiroWilk -from validmind import RawData class TestShapiroWilk(unittest.TestCase): @@ -31,7 +30,7 @@ def setUp(self): def test_returns_dataframe_and_rawdata(self): # Run the function - result_df, result_rawdata = ShapiroWilk(self.vm_dataset) + result_df = ShapiroWilk(self.vm_dataset) # Check if result_df is a DataFrame self.assertIsInstance(result_df, pd.DataFrame) @@ -43,12 +42,9 @@ def test_returns_dataframe_and_rawdata(self): # Check if the DataFrame has the expected number of rows (one for each numeric feature) self.assertEqual(len(result_df), len(self.vm_dataset.feature_columns_numeric)) - # Check if result_rawdata is an instance of RawData - self.assertIsInstance(result_rawdata, RawData) - def test_handles_different_distributions(self): # Run the function - result_df, _ = ShapiroWilk(self.vm_dataset) + result_df = ShapiroWilk(self.vm_dataset) # The normal distribution should have a higher p-value than the exponential distribution normal_pvalue = result_df[result_df["column"] == "normal_dist"]["pvalue"].iloc[ diff --git a/tests/unit_tests/data_validation/test_Skewness.py b/tests/unit_tests/data_validation/test_Skewness.py index b366ebdc2..1ea08bc21 100644 --- a/tests/unit_tests/data_validation/test_Skewness.py +++ b/tests/unit_tests/data_validation/test_Skewness.py @@ -38,12 +38,11 @@ def setUp(self): def test_skewness_threshold(self): # Test with default threshold (1) - results, passed, raw_data = Skewness(self.vm_dataset) + results, passed = Skewness(self.vm_dataset) # Check return types self.assertIsInstance(results, dict) self.assertIn(passed, [True, False]) - self.assertIsInstance(raw_data, RawData) # Check results structure results_table = results["Skewness Results for Dataset"] @@ -62,7 +61,7 @@ def test_skewness_threshold(self): def test_custom_threshold(self): # Test with very high threshold (all should pass) - results, passed, raw_data = Skewness(self.vm_dataset, max_threshold=10) + results, passed = Skewness(self.vm_dataset, max_threshold=10) results_table = results["Skewness Results for Dataset"] # All columns should pass with high threshold diff --git a/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py b/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py index 245b27db7..cb7108c61 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesHistogram.py @@ -4,7 +4,6 @@ import plotly.graph_objects as go import validmind as vm from validmind.tests.data_validation.TimeSeriesHistogram import TimeSeriesHistogram -from validmind import RawData class TestTimeSeriesHistogram(unittest.TestCase): @@ -51,15 +50,11 @@ def test_returns_tuple_of_figures_and_raw_data(self): # Check if result is a tuple self.assertIsInstance(result, tuple) - # Check if all elements except the last one in the tuple are Plotly Figures - for fig in result[:-1]: - self.assertIsInstance(fig, go.Figure) - - # The last element should be RawData - self.assertIsInstance(result[-1], RawData) + # Should have one histogram per column + self.assertEqual(len(result), len(self.df.columns)) - # Should have one histogram per column plus one RawData object - self.assertEqual(len(result), len(self.df.columns) + 1) + for fig in result: + self.assertIsInstance(fig, go.Figure) def test_histogram_properties(self): result = TimeSeriesHistogram(self.vm_dataset) diff --git a/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py b/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py index 979934c1e..aced18ed8 100644 --- a/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py +++ b/tests/unit_tests/model_validation/sklearn/test_RegressionErrors.py @@ -3,7 +3,6 @@ import numpy as np from sklearn.linear_model import LinearRegression import validmind as vm -from validmind import RawData from validmind.tests.model_validation.sklearn.RegressionErrors import RegressionErrors @@ -49,7 +48,7 @@ def setUp(self): def test_returns_dataframe_and_raw_data(self): # Run the function - results, raw_data = RegressionErrors(self.vm_model, self.vm_dataset) + results = RegressionErrors(self.vm_model, self.vm_dataset) # Check if results is a DataFrame self.assertIsInstance(results, pd.DataFrame) @@ -67,11 +66,8 @@ def test_returns_dataframe_and_raw_data(self): # Check if DataFrame has exactly one row self.assertEqual(len(results), 1) - # Check if raw_data is an instance of RawData - self.assertIsInstance(raw_data, RawData) - def test_error_metrics_range(self): - results, _ = RegressionErrors(self.vm_model, self.vm_dataset) + results = RegressionErrors(self.vm_model, self.vm_dataset) # All error metrics should be non-negative (except MBD) self.assertGreaterEqual(results["Mean Absolute Error (MAE)"].iloc[0], 0) @@ -113,7 +109,7 @@ def test_perfect_prediction(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Calculate errors - results, _ = RegressionErrors(vm_perfect_model, vm_perfect_dataset) + results = RegressionErrors(vm_perfect_model, vm_perfect_dataset) # All error metrics should be very close to 0 self.assertAlmostEqual( @@ -128,7 +124,7 @@ def test_perfect_prediction(self): ) def test_error_metrics_consistency(self): - results, _ = RegressionErrors(self.vm_model, self.vm_dataset) + results = RegressionErrors(self.vm_model, self.vm_dataset) # MSE should be greater than or equal to MAE squared mae = results["Mean Absolute Error (MAE)"].iloc[0] diff --git a/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py b/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py index 6554c931a..6388ece40 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py +++ b/tests/unit_tests/model_validation/statsmodels/test_DurbinWatsonTest.py @@ -3,7 +3,6 @@ import numpy as np from sklearn.linear_model import LinearRegression import validmind as vm -from validmind import RawData from validmind.tests.model_validation.statsmodels.DurbinWatsonTest import ( DurbinWatsonTest, ) @@ -64,7 +63,7 @@ def setUp(self): def test_returns_dataframe_and_rawdata(self): # Run the function - results, raw_data = DurbinWatsonTest(self.vm_dataset, self.vm_model) + results = DurbinWatsonTest(self.vm_dataset, self.vm_model) # Check if results is a DataFrame self.assertIsInstance(results, pd.DataFrame) @@ -76,9 +75,6 @@ def test_returns_dataframe_and_rawdata(self): # Check if DataFrame has exactly one row self.assertEqual(len(results), 1) - # Check if raw_data is an instance of RawData - self.assertIsInstance(raw_data, RawData) - def test_no_autocorrelation(self): # Create a dataset with no autocorrelation n_samples = 100 @@ -111,7 +107,7 @@ def test_no_autocorrelation(self): vm_no_auto_dataset.assign_predictions(vm_no_auto_model) # Run the function - results, raw_data = DurbinWatsonTest(vm_no_auto_dataset, vm_no_auto_model) + results = DurbinWatsonTest(vm_no_auto_dataset, vm_no_auto_model) # Check if results is a DataFrame self.assertIsInstance(results, pd.DataFrame) @@ -135,6 +131,3 @@ def test_no_autocorrelation(self): ) self.assertEqual(results["autocorrelation"].iloc[0], "No autocorrelation") self.assertEqual(results["threshold"].iloc[0], "[1.5, 2.5]") - - # Check if raw_data is an instance of RawData - self.assertIsInstance(raw_data, RawData) diff --git a/tests/unit_tests/model_validation/statsmodels/test_GINITable.py b/tests/unit_tests/model_validation/statsmodels/test_GINITable.py index 91c3e5980..bb44381d0 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_GINITable.py +++ b/tests/unit_tests/model_validation/statsmodels/test_GINITable.py @@ -56,14 +56,11 @@ def setUp(self): def test_returns_dataframe_and_rawdata(self): # Run the function - result, raw_data = GINITable(self.vm_dataset, self.vm_model) + result = GINITable(self.vm_dataset, self.vm_model) # Check if result is a DataFrame self.assertIsInstance(result, pd.DataFrame) - # Check if raw_data is RawData instance - self.assertIsInstance(raw_data, RawData) - # Check if DataFrame has expected columns expected_columns = ["AUC", "GINI", "KS"] self.assertTrue(all(col in result.columns for col in expected_columns)) @@ -103,7 +100,7 @@ def test_perfect_separation(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Calculate metrics - result, _ = GINITable(vm_perfect_dataset, vm_perfect_model) + result = GINITable(vm_perfect_dataset, vm_perfect_model) # For perfect separation: # - AUC should be 1.0 @@ -147,7 +144,7 @@ def test_random_prediction(self): vm_random_dataset.assign_predictions(vm_random_model) # Calculate metrics - result, _ = GINITable(vm_random_dataset, vm_random_model) + result = GINITable(vm_random_dataset, vm_random_model) # For random predictions: # - AUC should be close to 0.5 @@ -161,7 +158,7 @@ def test_random_prediction(self): def test_metric_ranges(self): # Test regular case - result, _ = GINITable(self.vm_dataset, self.vm_model) + result = GINITable(self.vm_dataset, self.vm_model) # Check metric ranges # AUC should be between 0 and 1 diff --git a/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py b/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py index 2f87bb1ca..cd55a22a4 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py +++ b/tests/unit_tests/model_validation/statsmodels/test_PredictionProbabilitiesHistogram.py @@ -58,14 +58,13 @@ def setUp(self): def test_returns_figure_and_raw_data(self): # Run the function - result = PredictionProbabilitiesHistogram(self.vm_dataset, self.vm_model) + figure = PredictionProbabilitiesHistogram(self.vm_dataset, self.vm_model) # Check if result contains a Plotly Figure and RawData - self.assertIsInstance(result[0], go.Figure) - self.assertIsInstance(result[1], vm.RawData) + self.assertIsInstance(figure, go.Figure) # Check if figure has traces - self.assertGreater(len(result[0].data), 0) + self.assertGreater(len(figure.data), 0) def test_perfect_separation(self): # Create a dataset with perfect class separation @@ -99,15 +98,15 @@ def test_perfect_separation(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Generate histogram - result = PredictionProbabilitiesHistogram(vm_perfect_dataset, vm_perfect_model) + figure = PredictionProbabilitiesHistogram(vm_perfect_dataset, vm_perfect_model) # Check if there are exactly two traces (one for each class) - self.assertEqual(len(result[0].data), 2) + self.assertEqual(len(figure.data), 2) def test_probability_ranges(self): - result = PredictionProbabilitiesHistogram(self.vm_dataset, self.vm_model) + figure = PredictionProbabilitiesHistogram(self.vm_dataset, self.vm_model) # Check if probabilities are within [0, 1] range - for trace in result[0].data: + for trace in figure.data: x_values = trace.x self.assertTrue(all(0 <= x <= 1 for x in x_values)) diff --git a/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py b/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py index ba85b3366..61cc15936 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py +++ b/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py @@ -4,7 +4,6 @@ import statsmodels.api as sm import validmind as vm import plotly.graph_objects as go -from validmind import RawData from validmind.tests.model_validation.statsmodels.RegressionCoeffs import ( RegressionCoeffs, ) @@ -53,8 +52,9 @@ def test_returns_tuple(self): # Check if first element is a Plotly Figure self.assertIsInstance(result[0], go.Figure) - # Check if second element is an instance of RawData - self.assertIsInstance(result[1], RawData) + # Check if second element is a table (list of dicts) + self.assertIsInstance(result[1], list) + self.assertIsInstance(result[1][0], dict) def test_plot_properties(self): # Run the function diff --git a/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py b/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py index ed6819034..2e471805c 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py +++ b/tests/unit_tests/model_validation/statsmodels/test_ScorecardHistogram.py @@ -5,7 +5,6 @@ import plotly.graph_objects as go from validmind.tests.model_validation.statsmodels.ScorecardHistogram import ( ScorecardHistogram, - RawData, ) @@ -52,16 +51,13 @@ def setUp(self): def test_returns_figure_and_raw_data(self): # Run the function - result_figure, result_raw_data = ScorecardHistogram(self.vm_dataset) + figure = ScorecardHistogram(self.vm_dataset) # Check if the first part of the result is a Plotly Figure - self.assertIsInstance(result_figure, go.Figure) + self.assertIsInstance(figure, go.Figure) # Check if figure has traces - self.assertGreater(len(result_figure.data), 0) - - # Check if the second part of the result is RawData - self.assertIsInstance(result_raw_data, RawData) + self.assertGreater(len(figure.data), 0) def test_missing_score_column(self): # Create dataset without score column @@ -78,12 +74,12 @@ def test_missing_score_column(self): ScorecardHistogram(vm_dataset_no_score) def test_histogram_properties(self): - result_figure, _ = ScorecardHistogram(self.vm_dataset) + figure = ScorecardHistogram(self.vm_dataset) # Should have two traces (one for each class) - self.assertEqual(len(result_figure.data), 2) + self.assertEqual(len(figure.data), 2) - for trace in result_figure.data: + for trace in figure.data: # Check if trace type is histogram self.assertEqual(trace.type, "histogram") @@ -93,13 +89,13 @@ def test_histogram_properties(self): def test_class_separation(self): # Now test the visualization - result_figure, _ = ScorecardHistogram(self.vm_dataset) + figure = ScorecardHistogram(self.vm_dataset) # Get scores for each class from the traces class_0_scores = None class_1_scores = None - for trace in result_figure.data: + for trace in figure.data: if "target = 0" in trace.name: class_0_scores = np.array(trace.x) elif "target = 1" in trace.name: diff --git a/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py b/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py index 2f4a4d74e..5437f365d 100644 --- a/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py +++ b/tests/unit_tests/model_validation/test_TimeSeriesPredictionsPlot.py @@ -51,20 +51,18 @@ def setUp(self): def test_return_types(self): """Test if function returns a tuple with a Plotly figure and raw data.""" result = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) - self.assertIsInstance(result, tuple) - self.assertIsInstance(result[0], go.Figure) - self.assertIsInstance(result[1], vm.RawData) + self.assertIsInstance(result, go.Figure) def test_figure_properties(self): """Test if figure has expected properties.""" - fig, _ = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) + fig = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) # Check if figure has exactly two traces (Actual and Predicted) self.assertEqual(len(fig.data), 2) def test_data_length(self): """Test if the plotted data has correct length.""" - fig, _ = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) + fig = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) # Both traces should have same length as input data self.assertEqual(len(fig.data[0].x), len(self.df)) @@ -74,7 +72,7 @@ def test_data_length(self): def test_datetime_index(self): """Test if x-axis uses datetime values.""" - fig, _ = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) + fig = TimeSeriesPredictionsPlot(self.vm_dataset, self.vm_model) # Check if x values are datetime objects self.assertTrue( diff --git a/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py b/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py index edc6c4b07..9d339e829 100644 --- a/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py +++ b/tests/unit_tests/model_validation/test_TimeSeriesR2SquareBySegments.py @@ -2,7 +2,6 @@ import pandas as pd import numpy as np import plotly.graph_objects as go -from datetime import datetime import validmind as vm from validmind.tests.model_validation.TimeSeriesR2SquareBySegments import ( TimeSeriesR2SquareBySegments, @@ -47,18 +46,15 @@ def setUp(self): def test_return_types(self): """Test if function returns expected types.""" - fig, results_df, raw_data = TimeSeriesR2SquareBySegments( - self.vm_dataset, self.vm_model - ) + fig, results_df = TimeSeriesR2SquareBySegments(self.vm_dataset, self.vm_model) # Check return types self.assertIsInstance(fig, go.Figure) self.assertIsInstance(results_df, pd.DataFrame) - self.assertIsInstance(raw_data, vm.RawData) def test_results_dataframe(self): """Test if results DataFrame has expected structure.""" - _, results_df, _ = TimeSeriesR2SquareBySegments(self.vm_dataset, self.vm_model) + _, results_df = TimeSeriesR2SquareBySegments(self.vm_dataset, self.vm_model) # Check columns expected_columns = ["Segments", "Start Date", "End Date", "R-Squared"] @@ -82,7 +78,7 @@ def test_custom_segments(self): "end_date": [dates[32], dates[65], dates[-1]], } - _, results_df, _ = TimeSeriesR2SquareBySegments( + _, results_df = TimeSeriesR2SquareBySegments( self.vm_dataset, self.vm_model, segments=custom_segments ) From f4159bfb9b2014ac6391b54581f1e39478c2bad8 Mon Sep 17 00:00:00 2001 From: John Walz Date: Thu, 9 Jan 2025 16:59:05 -0500 Subject: [PATCH 11/13] fix: should be the last fixes to unit tests --- .../data_validation/test_TimeSeriesLinePlot.py | 12 +++--------- .../data_validation/test_TimeSeriesOutliers.py | 4 +--- .../data_validation/test_ZivotAndrewsArch.py | 6 +----- .../sklearn/test_FeatureImportance.py | 12 ++++-------- .../sklearn/test_RegressionR2SquareComparison.py | 14 ++++---------- .../statsmodels/test_RegressionCoeffs.py | 5 ++--- 6 files changed, 15 insertions(+), 38 deletions(-) diff --git a/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py b/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py index a9bea6dd5..a4dae25b1 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesLinePlot.py @@ -2,7 +2,6 @@ import pandas as pd import validmind as vm import plotly.graph_objs as go -from validmind import RawData from validmind.errors import SkipTestError from validmind.tests.data_validation.TimeSeriesLinePlot import TimeSeriesLinePlot @@ -36,17 +35,12 @@ def test_time_series_line_plot(self): # Check that we get the correct number of figures plus raw data (one per feature + RawData) self.assertIsInstance(figures, tuple) - self.assertEqual( - len(figures), 3 - ) # Should have 2 figures for A and B and 1 RawData + self.assertEqual(len(figures), 2) # Should have 2 figures for A and B - # Check that the first two outputs are plotly figures - for fig in figures[:2]: + # Check that the figures are plotly figures + for fig in figures: self.assertIsInstance(fig, go.Figure) - # Check that the last output is RawData - self.assertIsInstance(figures[-1], RawData) - def test_no_datetime_index(self): # Should raise SkipTestError when no datetime index present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py b/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py index dc3953f22..193653a91 100644 --- a/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py +++ b/tests/unit_tests/data_validation/test_TimeSeriesOutliers.py @@ -5,7 +5,6 @@ import plotly.graph_objs as go from validmind.errors import SkipTestError from validmind.tests.data_validation.TimeSeriesOutliers import TimeSeriesOutliers -from validmind import RawData class TestTimeSeriesOutliers(unittest.TestCase): @@ -37,13 +36,12 @@ def setUp(self): ) def test_time_series_outliers(self): - outlier_df, figures, passed, raw_data = TimeSeriesOutliers(self.vm_dataset) + outlier_df, figures, passed = TimeSeriesOutliers(self.vm_dataset) # Check return types self.assertIsInstance(outlier_df, pd.DataFrame) self.assertIsInstance(figures, list) self.assertIsInstance(passed, bool) - self.assertIsInstance(raw_data, RawData) # Check that we have the expected number of figures (one per feature) self.assertEqual(len(figures), 2) diff --git a/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py b/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py index c6e9c588c..97e7716c7 100644 --- a/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py +++ b/tests/unit_tests/data_validation/test_ZivotAndrewsArch.py @@ -4,7 +4,6 @@ import validmind as vm from validmind.errors import SkipTestError from validmind.tests.data_validation.ZivotAndrewsArch import ZivotAndrewsArch -from validmind import RawData class TestZivotAndrewsArch(unittest.TestCase): @@ -40,7 +39,7 @@ def setUp(self): ) def test_zivot_andrews(self): - result, raw_data = ZivotAndrewsArch(self.vm_dataset) + result = ZivotAndrewsArch(self.vm_dataset) # Check return type and structure self.assertIsInstance(result, dict) @@ -57,9 +56,6 @@ def test_zivot_andrews(self): for field in required_fields: self.assertIn(field, value) - # Check raw data - self.assertIsInstance(raw_data, RawData) - def test_no_datetime_index(self): # Should raise SkipTestError when no datetime index present with self.assertRaises(SkipTestError): diff --git a/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py b/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py index 8a2d71d02..7cd346336 100644 --- a/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py +++ b/tests/unit_tests/model_validation/sklearn/test_FeatureImportance.py @@ -3,7 +3,6 @@ import numpy as np from sklearn.linear_model import LinearRegression import validmind as vm -from validmind import RawData from validmind.tests.model_validation.sklearn.FeatureImportance import FeatureImportance @@ -59,7 +58,7 @@ def setUp(self): def test_returns_dataframe_and_rawdata(self): # Run the function - result_df, raw_data = FeatureImportance(self.vm_dataset, self.vm_model) + result_df = FeatureImportance(self.vm_dataset, self.vm_model) # Check if result_df is a DataFrame self.assertIsInstance(result_df, pd.DataFrame) @@ -68,12 +67,9 @@ def test_returns_dataframe_and_rawdata(self): expected_columns = ["Feature 1", "Feature 2", "Feature 3"] self.assertTrue(all(col in result_df.columns for col in expected_columns)) - # Check if raw_data is an instance of RawData - self.assertIsInstance(raw_data, RawData) - def test_feature_importance_ranking(self): # Run with all features - result_df, _ = FeatureImportance(self.vm_dataset, self.vm_model, num_features=4) + result_df = FeatureImportance(self.vm_dataset, self.vm_model, num_features=4) # Get feature names and scores features = [] @@ -91,7 +87,7 @@ def test_feature_importance_ranking(self): def test_num_features_parameter(self): # Test with different num_features values for num_features in [2, 3, 4]: - result_df, _ = FeatureImportance( + result_df = FeatureImportance( self.vm_dataset, self.vm_model, num_features=num_features ) @@ -102,7 +98,7 @@ def test_num_features_parameter(self): self.assertEqual(len(feature_columns), num_features) def test_feature_importance_scores(self): - result_df, _ = FeatureImportance(self.vm_dataset, self.vm_model) + result_df = FeatureImportance(self.vm_dataset, self.vm_model) # Get first feature score first_feature = result_df["Feature 1"].iloc[0] diff --git a/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py b/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py index ae936193d..ee77ededd 100644 --- a/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py +++ b/tests/unit_tests/model_validation/sklearn/test_RegressionR2SquareComparison.py @@ -4,7 +4,6 @@ from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor import validmind as vm -from validmind import RawData from validmind.tests.model_validation.sklearn.RegressionR2SquareComparison import ( RegressionR2SquareComparison, ) @@ -89,7 +88,7 @@ def setUp(self): def test_returns_dataframe(self): # Run the function - result_df, raw_data = RegressionR2SquareComparison( + result_df = RegressionR2SquareComparison( [self.vm_dataset1, self.vm_dataset2], [self.vm_model1, self.vm_model2] ) @@ -103,9 +102,6 @@ def test_returns_dataframe(self): # Check if DataFrame has correct number of rows (2 datasets * 2 models) self.assertEqual(len(result_df), 2) - # Check raw data - self.assertIsInstance(raw_data, RawData) - def test_perfect_prediction(self): # Create a perfect prediction scenario perfect_df = pd.DataFrame( @@ -138,7 +134,7 @@ def test_perfect_prediction(self): vm_perfect_dataset.assign_predictions(vm_perfect_model) # Calculate R2 scores - result_df, raw_data = RegressionR2SquareComparison( + result_df = RegressionR2SquareComparison( [vm_perfect_dataset], [vm_perfect_model] ) @@ -148,7 +144,7 @@ def test_perfect_prediction(self): def test_model_comparison(self): # Compare linear model vs random forest on non-linear dataset - result_df, raw_data = RegressionR2SquareComparison( + result_df = RegressionR2SquareComparison( [self.vm_dataset2, self.vm_dataset2], [self.vm_model1, self.vm_model2] ) @@ -192,9 +188,7 @@ def test_poor_prediction(self): vm_poor_dataset.assign_predictions(vm_poor_model) # Calculate R2 scores - result_df, raw_data = RegressionR2SquareComparison( - [vm_poor_dataset], [vm_poor_model] - ) + result_df = RegressionR2SquareComparison([vm_poor_dataset], [vm_poor_model]) # R2 scores should be close to 0 for poor predictions self.assertLess(result_df["R-Squared"].iloc[0], 0.1) diff --git a/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py b/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py index 61cc15936..88f0448f7 100644 --- a/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py +++ b/tests/unit_tests/model_validation/statsmodels/test_RegressionCoeffs.py @@ -52,9 +52,8 @@ def test_returns_tuple(self): # Check if first element is a Plotly Figure self.assertIsInstance(result[0], go.Figure) - # Check if second element is a table (list of dicts) - self.assertIsInstance(result[1], list) - self.assertIsInstance(result[1][0], dict) + # Check if second element is a table (DataFrame) + self.assertIsInstance(result[1], pd.DataFrame) def test_plot_properties(self): # Run the function From 6aaf394c3d7ede580688a0643db50a91c6eb317b Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Tue, 14 Jan 2025 08:07:06 -0800 Subject: [PATCH 12/13] Fix segfault on Mac --- tests/test_unit_tests.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/test_unit_tests.py b/tests/test_unit_tests.py index 73ceb884f..0d7d532f8 100644 --- a/tests/test_unit_tests.py +++ b/tests/test_unit_tests.py @@ -11,6 +11,17 @@ print_coverage_statistics, ) +# Limit OpenMP on Mac so it doesn't segfault: +# +# By limiting OpenMP to a single thread (OMP_NUM_THREADS=1), we: +# • Prevent nested parallelism from creating too many threads. +# • Simplify thread management, avoiding conflicts or resource contention. +# • Allow other threading backends (e.g., Apple’s libdispatch or PyTorch's +# thread pool) to manage parallelism more predictably. +if sys.platform == "darwin": + os.environ["OMP_NUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + logger = get_logger(__name__) KNOWN_FAILING_TESTS = [ From 538725867120672a68872936cde22f20ed751562 Mon Sep 17 00:00:00 2001 From: John Walz Date: Tue, 14 Jan 2025 11:59:30 -0500 Subject: [PATCH 13/13] chore: remove old vm test unit tests --- tests/test_integration_tests.py | 302 -------------------------------- 1 file changed, 302 deletions(-) delete mode 100644 tests/test_integration_tests.py diff --git a/tests/test_integration_tests.py b/tests/test_integration_tests.py deleted file mode 100644 index 2107e4848..000000000 --- a/tests/test_integration_tests.py +++ /dev/null @@ -1,302 +0,0 @@ -# """This is a test harness to run unit tests against the ValidMind tests""" - -# import os -# import time -# import unittest -# from fnmatch import fnmatch - -# import matplotlib.pyplot as plt - -# from tabulate import tabulate -# from tqdm import tqdm -# from validmind.logging import get_logger -# from validmind.tests import list_tests, load_test, run_test -# from validmind.vm_models.result import TestResult - -# from run_test_utils import ( -# setup_clustering_test_inputs, -# setup_embeddings_test_inputs, -# setup_summarization_test_inputs, -# setup_tabular_test_inputs, -# setup_time_series_test_inputs, -# ) - - -# logger = get_logger(__name__) -# # Override plt.show to do nothing -# plt.show = lambda: None - -# # These tests are expected to fail and need to be fixed -# KNOWN_FAILING_TESTS = [ -# # Only statsmodels are supported for these metrics -# "validmind.model_validation.statsmodels.RegressionCoeffs", -# "validmind.model_validation.statsmodels.RegressionFeatureSignificance", -# # The number of observations is too small to use the Zivot-Andrews test -# "validmind.data_validation.ZivotAndrewsArch", -# # These tests can be turned into comparison tests: -# "validmind.model_validation.embeddings.CosineSimilarityComparison", -# "validmind.model_validation.embeddings.EuclideanDistanceComparison", -# # This is a base test class so it doesn't run on its own -# "validmind.model_validation.sklearn.ClusterPerformance", -# # ValueError: The `cluster_column` parameter must be provided -# "validmind.model_validation.embeddings.EmbeddingsVisualization2D", -# # These tests have dependencies that are not installed by default -# "validmind.data_validation.ProtectedClassesCombination", -# "validmind.data_validation.ProtectedClassesDisparity", -# "validmind.data_validation.ProtectedClassesThresholdOptimizer", -# # The customer churn classification dataset contains a string column 'Germany' which is not supported by the MutualInformation test -# "validmind.data_validation.MutualInformation", -# # The required column 'score' is not present in the dataset with input_id test_dataset -# "validmind.data_validation.ScoreBandDefaultRates", -# "validmind.model_validation.sklearn.ScoreProbabilityAlignment", -# ] -# SKIPPED_TESTS = [] -# SUCCESSFUL_TESTS = [] - -# # Harcode some tests that require specific inputs instead of trying to -# # guess from tags or tasks -# CUSTOM_TEST_INPUT_ASSIGNMENTS = { -# "validmind.data_validation.DatasetDescription": "classification", -# "validmind.data_validation.DatasetSplit": "classification", -# "validmind.model_validation.ModelMetadata": "classification", -# } - -# # Some tests require specific configurations. This is either expected and required -# # or we need to fix these tests so they can run with sane defaults -# # -# # Here we assign config param keys to each test and then let the test runner know -# # how to load the config for that test -# TEST_TO_PARAMS_CONFIG = { -# # TODO: features_pairs should default to all input dataset pairs -# "validmind.model_validation.statsmodels.ScorecardHistogram": "score_column", -# # TODO: "ValueError: perplexity must be less than n_samples if using defaults" -# "validmind.model_validation.embeddings.TSNEComponentsPairwisePlots": "t_sne_config", -# "validmind.model_validation.sklearn.KMeansClustersOptimization": "kmeans_config", -# "validmind.model_validation.sklearn.HyperParametersTuning": "hyperparameter_tuning_config", -# "validmind.model_validation.embeddings.StabilityAnalysisKeyword": "stability_analysis_keyword_config", -# } - -# # Global inputs and configurations for the tests -# TEST_CONFIG = {} -# TEST_INPUTS = {} - - -# class TestRunTest(unittest.TestCase): -# pass - - -# def create_unit_test_func(vm_test_id, test_func): -# def unit_test_func(self): -# self.assertTrue( -# hasattr(test_func, "inputs"), -# f"{vm_test_id} missing required inputs", -# ) -# self.assertTrue( -# hasattr(test_func, "__tasks__"), -# f"{vm_test_id} missing tasks in metadata", -# ) -# self.assertTrue( -# hasattr(test_func, "__tags__"), -# f"{vm_test_id} missing tags in metadata", -# ) - -# required_inputs = sorted(test_func.inputs) -# if required_inputs == ["datasets", "models"]: -# logger.debug( -# "Skipping test - multi-(dataset,model) tests are not supported at the moment %s", -# vm_test_id, -# ) -# SKIPPED_TESTS.append(vm_test_id) -# return - -# if "llm" in test_func.__tags__ and "embeddings" not in test_func.__tags__: -# logger.debug( -# "--- Skipping test - LLM tests not supported yet %s", -# vm_test_id, -# ) -# SKIPPED_TESTS.append(vm_test_id) -# return - -# logger.debug(">>> Running test %s", vm_test_id) - -# # Assume we'll load the classification (tabular) inputs in most cases -# custom_test_input_assignment = CUSTOM_TEST_INPUT_ASSIGNMENTS.get(vm_test_id) -# selected_test_inputs = None - -# if custom_test_input_assignment: -# selected_test_inputs = custom_test_input_assignment -# elif "clustering" in test_func.__tasks__: -# selected_test_inputs = "clustering" -# elif "embeddings" in test_func.__tags__: -# selected_test_inputs = "embeddings" -# elif ( -# "text_summarization" in test_func.__tasks__ or "nlp" in test_func.__tasks__ -# ): -# selected_test_inputs = "text_summarization" -# elif "time_series_data" in test_func.__tags__: -# selected_test_inputs = "time_series" -# else: -# selected_test_inputs = "classification" - -# inputs = TEST_INPUTS[selected_test_inputs] - -# # Build the single test inputs according to the required inputs -# single_test_inputs = {} -# if required_inputs == ["dataset"]: -# single_test_inputs = inputs["single_dataset"] -# elif required_inputs == ["dataset", "model"]: -# single_test_inputs = inputs["model_and_dataset"] -# elif required_inputs == ["datasets"]: -# single_test_inputs = inputs["two_datasets"] -# elif required_inputs == ["datasets", "model"]: -# single_test_inputs = inputs["model_and_two_datasets"] -# elif required_inputs == ["models"]: -# single_test_inputs = inputs["two_models"] -# elif required_inputs == ["dataset", "models"]: -# single_test_inputs = inputs["dataset_and_two_models"] -# elif required_inputs == ["model"]: -# single_test_inputs = inputs["single_model"] - -# test_kwargs = { -# "test_id": vm_test_id, -# "inputs": single_test_inputs, -# "show": False, -# "generate_description": False, -# } - -# # Check if the test requires a specific configuration -# if vm_test_id in TEST_TO_PARAMS_CONFIG: -# key = TEST_TO_PARAMS_CONFIG.get(vm_test_id) -# if key in TEST_CONFIG: -# test_config = TEST_CONFIG.get(key) -# # Only set the config if it's not None -# if test_config: -# test_kwargs["params"] = test_config -# else: -# logger.error( -# "Skipping test %s - missing expected configuration for %s", -# vm_test_id, -# key, -# ) -# SKIPPED_TESTS.append(vm_test_id) -# return - -# print(f"Running test {vm_test_id}...") -# start_time = time.time() -# result = run_test(**test_kwargs) -# end_time = time.time() -# execution_time = round(end_time - start_time, 2) - -# self.assertTrue( -# isinstance(result, TestResult), -# f"Expected TestResult, got {type(result)}", -# ) -# self.assertEqual( -# result.result_id, -# vm_test_id, -# f"Expected result_id to be {vm_test_id}, got {result.result_id}", -# ) - -# # Finally, the test worked so we can add it to the list of successful tests -# # and note the time it took to run -# SUCCESSFUL_TESTS.append( -# { -# "test_id": vm_test_id, -# "test_input_types": selected_test_inputs, -# "execution_time": execution_time, -# } -# ) - -# return unit_test_func - - -# def create_test_summary_func(): -# """ -# Create a function that prints a summary of the test results. -# We do this dynamically so it runs after all the tests have run. -# """ - -# def test_summary(self): -# self.assertTrue( -# True, -# "Test results not found. Did any tests run?", -# ) -# logger.info(">>> Test Summary") -# logger.info( -# ">>> NOTE: Please review failing test cases directly in the output below." -# ) - -# test_summary = [] -# for test in SUCCESSFUL_TESTS: -# test_summary.append( -# [ -# test["test_id"], -# test["test_input_types"], -# "SUCCESS", -# test["execution_time"], -# ] -# ) - -# for test in KNOWN_FAILING_TESTS: -# test_summary.append([test, None, "KNOWN FAILURE", None]) - -# for test in SKIPPED_TESTS: -# test_summary.append([test, None, "SKIPPED", None]) - -# print( -# tabulate( -# test_summary, -# headers=["Test ID", "Type of Test Inputs", "Status", "Execution Time"], -# tablefmt="pretty", -# ) -# ) - -# return test_summary - - -# def create_unit_test_funcs_from_vm_tests(): -# setup_tabular_test_inputs(TEST_INPUTS, TEST_CONFIG) -# setup_summarization_test_inputs(TEST_INPUTS, TEST_CONFIG) -# setup_time_series_test_inputs(TEST_INPUTS, TEST_CONFIG) -# setup_embeddings_test_inputs(TEST_INPUTS, TEST_CONFIG) -# setup_clustering_test_inputs(TEST_INPUTS, TEST_CONFIG) - -# custom_test_ids = os.environ.get("TEST_IDS") -# custom_test_ids = custom_test_ids.split(",") if custom_test_ids else None -# tests_to_run = list_tests(pretty=False) if not custom_test_ids else custom_test_ids - -# # allow filtering tests by wildcard using fnmatch -# # e.g. only run tests that start with "validmind.data_validation" -# # TEST_PATTERN="validmind.data_validation*" -# test_pattern = os.environ.get("TEST_PATTERN") -# if test_pattern: -# tests_to_run = [ -# test_id for test_id in tests_to_run if fnmatch(test_id, test_pattern) -# ] - -# for vm_test_id in tqdm(sorted(tests_to_run)): -# # Only skip known failing tests if we're not running a custom set of tests -# if custom_test_ids is None and vm_test_id in KNOWN_FAILING_TESTS: -# logger.debug("Skipping known failing test %s", vm_test_id) -# continue - -# # load the test class -# test_func = load_test(vm_test_id) - -# # create a unit test function for the test class -# unit_test_func = create_unit_test_func(vm_test_id, test_func) -# unit_test_func_name = f'test_{vm_test_id.replace(".", "_")}' - -# # add the unit test function to the unit test class -# setattr(TestRunTest, f"test_{unit_test_func_name}", unit_test_func) - -# # create a test summary function. the zzz is to ensure it runs last -# test_summary_func = create_test_summary_func() -# setattr(TestRunTest, "test_zzz_summary", test_summary_func) - - -# create_unit_test_funcs_from_vm_tests() - - -# if __name__ == "__main__": -# unittest.main()