diff --git a/site/notebooks.zip b/site/notebooks.zip index b944a4b345..96527bc5ef 100644 Binary files a/site/notebooks.zip and b/site/notebooks.zip differ diff --git a/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb b/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb index a42e05c064..f85b592d8a 100644 --- a/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb +++ b/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb @@ -440,15 +440,17 @@ }, { "cell_type": "markdown", - "id": "39c974f0", + "id": "copyright-3e02e70c1d4d4840bf8d9ef44e2cf20c", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb b/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb index c4b19ea852..1d62e9c085 100644 --- a/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb +++ b/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb @@ -989,14 +989,17 @@ }, { "cell_type": "markdown", + "id": "copyright-a258273ac8eb41a29a39e52e6f3b4341", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb b/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb index 7966f7886c..49df9a5b50 100644 --- a/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb +++ b/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb @@ -968,14 +968,17 @@ }, { "cell_type": "markdown", + "id": "copyright-2f589b81f04949f6a2c6764859b8bc86", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb b/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb index 8e57366658..0af95d90a7 100644 --- a/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb +++ b/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb @@ -967,14 +967,17 @@ }, { "cell_type": "markdown", + "id": "copyright-6ef1ef2b30eb44c29b99e625c58826ee", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_validation/1-set_up_validmind_for_validation.ipynb b/site/notebooks/EXECUTED/model_validation/1-set_up_validmind_for_validation.ipynb index 807d6caf9e..b9154b80c2 100644 --- a/site/notebooks/EXECUTED/model_validation/1-set_up_validmind_for_validation.ipynb +++ b/site/notebooks/EXECUTED/model_validation/1-set_up_validmind_for_validation.ipynb @@ -491,15 +491,17 @@ }, { "cell_type": "markdown", - "id": "47e90901", + "id": "copyright-5d7a1c159e4840fca79011d1c0380725", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_validation/2-start_validation_process.ipynb b/site/notebooks/EXECUTED/model_validation/2-start_validation_process.ipynb index e6fb19454c..48e4dbff66 100644 --- a/site/notebooks/EXECUTED/model_validation/2-start_validation_process.ipynb +++ b/site/notebooks/EXECUTED/model_validation/2-start_validation_process.ipynb @@ -862,14 +862,17 @@ }, { "cell_type": "markdown", + "id": "copyright-149e9401b9df40d393a2a1958331dea6", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_validation/3-developing_challenger_model.ipynb b/site/notebooks/EXECUTED/model_validation/3-developing_challenger_model.ipynb index 68398c09de..5f7d7e8fd2 100644 --- a/site/notebooks/EXECUTED/model_validation/3-developing_challenger_model.ipynb +++ b/site/notebooks/EXECUTED/model_validation/3-developing_challenger_model.ipynb @@ -865,14 +865,17 @@ }, { "cell_type": "markdown", + "id": "copyright-58757a9cc9de45069a5e6c57c8aaff14", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/EXECUTED/model_validation/4-finalize_validation_reporting.ipynb b/site/notebooks/EXECUTED/model_validation/4-finalize_validation_reporting.ipynb index c5c5f94ea0..fe4e221c90 100644 --- a/site/notebooks/EXECUTED/model_validation/4-finalize_validation_reporting.ipynb +++ b/site/notebooks/EXECUTED/model_validation/4-finalize_validation_reporting.ipynb @@ -1200,14 +1200,17 @@ }, { "cell_type": "markdown", + "id": "copyright-53539c44edf54e4e9bc4d959ffbddd2c", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/agents/langgraph_agent_simple_banking_demo.ipynb b/site/notebooks/code_samples/agents/langgraph_agent_simple_banking_demo.ipynb index 6f741793ad..7e80b877ac 100644 --- a/site/notebooks/code_samples/agents/langgraph_agent_simple_banking_demo.ipynb +++ b/site/notebooks/code_samples/agents/langgraph_agent_simple_banking_demo.ipynb @@ -1462,14 +1462,17 @@ }, { "cell_type": "markdown", + "id": "copyright-e7184e5605bb4f85b3d7b8306aaaef78", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb index 47e81ac1c0..6953959242 100644 --- a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb +++ b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb @@ -2061,15 +2061,17 @@ }, { "cell_type": "markdown", - "id": "adb15089", + "id": "copyright-a23adf093a60485ea005cf8fc18545a5", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb index cf6881d21e..1269254add 100644 --- a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb +++ b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb @@ -1291,15 +1291,17 @@ }, { "cell_type": "markdown", - "id": "cab6911c", + "id": "copyright-de5d1e182b09403abddabc2850f2dd05", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/code_explainer/quickstart_code_explainer_demo.ipynb b/site/notebooks/code_samples/code_explainer/quickstart_code_explainer_demo.ipynb index c770872b6f..1eb1ef747e 100644 --- a/site/notebooks/code_samples/code_explainer/quickstart_code_explainer_demo.ipynb +++ b/site/notebooks/code_samples/code_explainer/quickstart_code_explainer_demo.ipynb @@ -823,14 +823,17 @@ }, { "cell_type": "markdown", + "id": "copyright-ccbede139a26452183291a108b791513", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb index ed848457e5..2115a88b50 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb @@ -343,14 +343,17 @@ }, { "cell_type": "markdown", + "id": "copyright-97f68fa25e694a059b7028ce3ec374cc", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb index 5e3f04529f..83a6d276c8 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb @@ -863,14 +863,17 @@ }, { "cell_type": "markdown", + "id": "copyright-ce253f0d12144a08847d4a65a250a85f", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb index 18ad434543..a915608315 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb @@ -1508,14 +1508,17 @@ }, { "cell_type": "markdown", + "id": "copyright-6ee1a1ce0bd74036a8890be21965bfd2", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb index 6fdf3d523f..357d572b79 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb @@ -1958,14 +1958,17 @@ }, { "cell_type": "markdown", + "id": "copyright-00e4b240625f4af29adb179235912142", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/credit_risk/document_excel_application_scorecard.ipynb b/site/notebooks/code_samples/credit_risk/document_excel_application_scorecard.ipynb index 5602777c25..cc7685c9ca 100644 --- a/site/notebooks/code_samples/credit_risk/document_excel_application_scorecard.ipynb +++ b/site/notebooks/code_samples/credit_risk/document_excel_application_scorecard.ipynb @@ -967,14 +967,17 @@ }, { "cell_type": "markdown", + "id": "copyright-b04bb3907b774a148a65d78c539ffb4d", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb b/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb index 8988dcd0ae..17385b8e1c 100644 --- a/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb +++ b/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb @@ -1056,14 +1056,17 @@ }, { "cell_type": "markdown", + "id": "copyright-997b933948594ddd929ee9419957dfe3", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb b/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb index b8733b9818..ca1403fb6c 100644 --- a/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb +++ b/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb @@ -956,14 +956,17 @@ }, { "cell_type": "markdown", + "id": "copyright-e9a85f828fdb448ba20c565dec9a0b75", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/model_validation/validate_application_scorecard.ipynb b/site/notebooks/code_samples/model_validation/validate_application_scorecard.ipynb index c10a0bd979..22ab661f78 100644 --- a/site/notebooks/code_samples/model_validation/validate_application_scorecard.ipynb +++ b/site/notebooks/code_samples/model_validation/validate_application_scorecard.ipynb @@ -1837,14 +1837,17 @@ }, { "cell_type": "markdown", + "id": "copyright-3441e79421624bc7819dc2c2957bd93f", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb index 3da6b8e3fb..633c99e0c0 100644 --- a/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb @@ -459,14 +459,17 @@ }, { "cell_type": "markdown", + "id": "copyright-114aa3e23e7e44318b66971760526b6f", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb index 82e910fe58..a7f06ab155 100644 --- a/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb @@ -506,14 +506,17 @@ }, { "cell_type": "markdown", + "id": "copyright-b34e4189be964082bb87b10aa94dcf6a", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb index 825a225d5b..dd7e03d056 100644 --- a/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb @@ -443,14 +443,17 @@ }, { "cell_type": "markdown", + "id": "copyright-8e54f1f3ff334b529685ef57073abb3e", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb index 8ce761d0ea..8209e65c2c 100644 --- a/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb @@ -464,14 +464,17 @@ }, { "cell_type": "markdown", + "id": "copyright-14c6f04dcd164deb8a1db44fde050729", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb index 49f979ceb0..159a16e79d 100644 --- a/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb @@ -981,14 +981,17 @@ }, { "cell_type": "markdown", + "id": "copyright-f38869c3ab0c4de7989f536d06b51773", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb index 6e355c3ca5..49633b3a99 100644 --- a/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb @@ -514,14 +514,17 @@ }, { "cell_type": "markdown", + "id": "copyright-4101a5c77a954664ba8d8a682bee1a1c", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/rag_benchmark_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/rag_benchmark_demo.ipynb index 14ee7cd4a1..35969b1eb3 100644 --- a/site/notebooks/code_samples/nlp_and_llm/rag_benchmark_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/rag_benchmark_demo.ipynb @@ -1818,14 +1818,17 @@ }, { "cell_type": "markdown", + "id": "copyright-09e315440ca84258abe1aaefaca3a3d0", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb index 4222911e83..485c81ffdb 100644 --- a/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb @@ -1642,14 +1642,17 @@ }, { "cell_type": "markdown", + "id": "copyright-397fa35a68a34dc38f5d84d797fb5331", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb b/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb index 9763001c0e..1ff6ca3e05 100644 --- a/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb +++ b/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb @@ -1320,14 +1320,17 @@ }, { "cell_type": "markdown", + "id": "copyright-4c5ddb7cde514c958ea0048f5e472de5", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb b/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb index 520058e856..04f64a31ec 100644 --- a/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb +++ b/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb @@ -847,14 +847,17 @@ }, { "cell_type": "markdown", + "id": "copyright-c2ce5d3e3aa04c4ab7f6401ac810c67f", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb b/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb index 8f70b4811d..90992448e1 100644 --- a/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb +++ b/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb @@ -548,14 +548,17 @@ }, { "cell_type": "markdown", + "id": "copyright-ad12b1c3e98d435ea8cc57eadf4f5a76", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb b/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb index fa8cff0ebe..27e5727179 100644 --- a/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb +++ b/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb @@ -710,14 +710,17 @@ }, { "cell_type": "markdown", + "id": "copyright-3e1111bc137c44b9866472934320b128", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb b/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb index bc3a55d0c9..33a2e7220a 100644 --- a/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb +++ b/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb @@ -968,14 +968,17 @@ }, { "cell_type": "markdown", + "id": "copyright-0e30120b73cd445b92ff0637c3467c01", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/assign_scores_complete_tutorial.ipynb b/site/notebooks/how_to/assign_scores_complete_tutorial.ipynb index f8af3393bd..2b96099dff 100644 --- a/site/notebooks/how_to/assign_scores_complete_tutorial.ipynb +++ b/site/notebooks/how_to/assign_scores_complete_tutorial.ipynb @@ -1,810 +1,813 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Intro to Assign Scores" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The `assign_scores()` method is a powerful feature that allows you to compute and add scorer scores as new columns in your dataset. This method takes a model and metric(s) as input, computes the specified metrics from the ValidMind scorer library, and adds them as new columns. The computed metrics provide per-row values, giving you granular insights into model performance at the individual prediction level.\n", - "\n", - "In this interactive notebook, we demonstrate how to use the `assign_scores()` method effectively. We'll walk through a complete example using a customer churn dataset, showing how to compute and assign row-level metrics (like Brier Score and Log Loss) that provide detailed performance insights for each prediction. You'll learn how to work with single and multiple scorers, pass custom parameters, and handle different metric types - all while maintaining a clean, organized dataset structure. Currently, assign_scores() supports all metrics available in the validmind.scorer module.\n", - "\n", - "**The Power of Row-Level Scoring**\n", - "\n", - "Traditional model evaluation workflows often focus on aggregate metrics that provide overall performance summaries. The `assign_scores()` method complements this by providing granular, row-level insights that help you:\n", - "\n", - "- **Identify Problematic Predictions**: Spot individual cases where your model performs poorly\n", - "- **Understand Model Behavior**: Analyze how model performance varies across different types of inputs\n", - "- **Enable Detailed Analysis**: Perform targeted investigations on specific subsets of your data\n", - "- **Support Model Debugging**: Pinpoint exactly where and why your model makes errors\n", - "\n", - "**Understanding assign_scores()**\n", - "\n", - "The `assign_scores()` method computes row metrics for a given model-dataset combination and adds the results as new columns to your dataset. Each new column follows the naming convention: `{model.input_id}_{metric_name}`, ensuring clear identification of which model and metric combination generated each score.\n", - "\n", - "Key features:\n", - "\n", - "- **Row-Level Focus**: Computes per-prediction metrics rather than aggregate scores\n", - "- **Flexible Input**: Accepts single metrics or lists of metrics\n", - "- **Parameter Support**: Allows passing additional parameters to underlying metric implementations\n", - "- **Multi-Model Support**: Can assign scores from multiple models to the same dataset\n", - "- **Type Agnostic**: Works with classification, regression, and other model types\n", - "\n", - "This approach provides detailed insights into your model's performance at the individual prediction level, enabling more sophisticated analysis and debugging workflows." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1__) \n", - " - [Before you begin](#toc1_1__) \n", - " - [New to ValidMind?](#toc1_2__) \n", - "- [Setting up](#toc2__) \n", - " - [Install the ValidMind Library](#toc2_1__) \n", - " - [Initialize the ValidMind Library](#toc2_2__) \n", - " - [Register sample model](#toc2_2_1__) \n", - " - [Apply documentation template](#toc2_2_2__) \n", - " - [Get your code snippet](#toc2_2_3__) \n", - "- [Load the demo dataset](#toc3__) \n", - "- [Train models for testing](#toc4__) \n", - "- [Initialize ValidMind objects](#toc5__) \n", - "- [Assign predictions](#toc6__) \n", - "- [Using assign_scores()](#toc7__) \n", - " - [Basic Usage](#toc7_1__) \n", - " - [Single Scorer Assignment](#toc7_2__) \n", - " - [A Scorer returns complex object](#toc7_3__) \n", - " - [Multiple Scorers Assignment](#toc7_4__) \n", - " - [Passing Parameters to Scorer](#toc7_5__) \n", - " - [Multi-Model scorers](#toc7_6__) \n", - " - [Scorer Metrics](#toc7_7__) \n", - " - [Custom Scorer](#toc7_8__) \n", - "- [Next steps](#toc8__) \n", - " - [Work with your model documentation](#toc8_1__) \n", - " - [Discover more learning resources](#toc8_2__) \n", - "- [Upgrade ValidMind](#toc9__) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Setting up" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Install the ValidMind Library\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Initialize the ValidMind Library" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Register sample model\n", - "\n", - "Let's first register a sample model for use with this notebook:\n", - "\n", - "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", - "\n", - "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", - "\n", - "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", - "\n", - " For example, to register a model for use with this notebook, select the following use case: `Marketing/Sales - Analytics`\n", - "\n", - "4. Select your own name under the **MODEL OWNER** drop-down.\n", - "\n", - "5. Click **Register Model** to add the model to your inventory." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Apply documentation template\n", - "\n", - "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", - "\n", - "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", - "\n", - "2. Under **TEMPLATE**, select `Binary classification`.\n", - "\n", - "3. Click **Use Template** to apply the template." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "#### Get your code snippet\n", - "\n", - "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", - "\n", - "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", - "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load your model identifier credentials from an `.env` file\n", - "\n", - "%load_ext dotenv\n", - "%dotenv .env\n", - "\n", - "# Or replace with your code snippet\n", - "\n", - "import validmind as vm\n", - "\n", - "vm.init(\n", - " api_host=\"...\",\n", - " api_key=\"...\",\n", - " api_secret=\"...\",\n", - " model=\"...\",\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Load the demo dataset\n", - "\n", - "In this example, we load a demo dataset to demonstrate the assign_scores functionality with customer churn prediction models." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.datasets.classification import customer_churn as demo_dataset\n", - "\n", - "print(\n", - " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", - ")\n", - "\n", - "raw_df = demo_dataset.load_data()\n", - "raw_df.head()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Train models for testing\n", - "\n", - "We'll train two different customer churn models to demonstrate the assign_scores functionality with multiple models." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import xgboost as xgb\n", - "from sklearn.ensemble import RandomForestClassifier\n", - "\n", - "# Preprocess the data\n", - "train_df, validation_df, test_df = demo_dataset.preprocess(raw_df)\n", - "\n", - "# Prepare training data\n", - "x_train = train_df.drop(demo_dataset.target_column, axis=1)\n", - "y_train = train_df[demo_dataset.target_column]\n", - "x_val = validation_df.drop(demo_dataset.target_column, axis=1)\n", - "y_val = validation_df[demo_dataset.target_column]\n", - "\n", - "# Train XGBoost model\n", - "xgb_model = xgb.XGBClassifier(early_stopping_rounds=10, random_state=42)\n", - "xgb_model.set_params(\n", - " eval_metric=[\"error\", \"logloss\", \"auc\"],\n", - ")\n", - "xgb_model.fit(\n", - " x_train,\n", - " y_train,\n", - " eval_set=[(x_val, y_val)],\n", - " verbose=False,\n", - ")\n", - "\n", - "# Train Random Forest model\n", - "rf_model = RandomForestClassifier(n_estimators=100, random_state=42)\n", - "rf_model.fit(x_train, y_train)\n", - "\n", - "print(\"Models trained successfully!\")\n", - "print(f\"XGBoost training accuracy: {xgb_model.score(x_train, y_train):.3f}\")\n", - "print(f\"Random Forest training accuracy: {rf_model.score(x_train, y_train):.3f}\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Initialize ValidMind objects\n", - "\n", - "We initialize ValidMind `dataset` and `model` objects. The `input_id` parameter is crucial for the assign_scores functionality as it determines the column naming convention for assigned scores." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize datasets\n", - "vm_train_ds = vm.init_dataset(\n", - " input_id=\"train_dataset\",\n", - " dataset=train_df,\n", - " target_column=demo_dataset.target_column,\n", - ")\n", - "vm_test_ds = vm.init_dataset(\n", - " input_id=\"test_dataset\",\n", - " dataset=test_df,\n", - " target_column=demo_dataset.target_column,\n", - ")\n", - "\n", - "# Initialize models with descriptive input_ids\n", - "vm_xgb_model = vm.init_model(model=xgb_model, input_id=\"xgboost_model\")\n", - "vm_rf_model = vm.init_model(model=rf_model, input_id=\"random_forest_model\")\n", - "\n", - "print(\"ValidMind objects initialized successfully!\")\n", - "print(f\"XGBoost model ID: {vm_xgb_model.input_id}\")\n", - "print(f\"Random Forest model ID: {vm_rf_model.input_id}\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Assign predictions\n", - "\n", - "Before we can use assign_scores(), we need to assign predictions to our datasets. This step is essential as many unit metrics require both actual and predicted values." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assign predictions for both models to both datasets\n", - "vm_train_ds.assign_predictions(model=vm_xgb_model)\n", - "vm_train_ds.assign_predictions(model=vm_rf_model)\n", - "\n", - "vm_test_ds.assign_predictions(model=vm_xgb_model)\n", - "vm_test_ds.assign_predictions(model=vm_rf_model)\n", - "\n", - "print(\"Predictions assigned successfully!\")\n", - "print(f\"Test dataset now has {len(vm_test_ds.df.columns)} columns\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Using assign_scores()\n", - "\n", - "Now we'll explore the various ways to use the assign_scores() method to integrate performance metrics directly into your dataset." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Basic Usage\n", - "\n", - "The assign_scores() method has a simple interface:\n", - "\n", - "```python\n", - "dataset.assign_scores(model, metrics, **kwargs)\n", - "```\n", - "\n", - "- **model**: A ValidMind model object\n", - "- **metrics**: Single metric ID or list of metric IDs (can use short names or full IDs)\n", - "- **kwargs**: Additional parameters passed to the underlying metric implementations\n", - "\n", - "Let's first check what columns we currently have in our test dataset:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Current columns in test dataset:\")\n", - "for i, col in enumerate(vm_test_ds.df.columns, 1):\n", - " print(f\"{i:2d}. {col}\")\n", - "\n", - "print(f\"\\nDataset shape: {vm_test_ds.df.shape}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Single Scorer Assignment\n", - " \n", - "Let's start by assigning a single Scorer - the Brier Score - for our XGBoost model on the test dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assign Brier Score for XGBoost model\n", - "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.BrierScore\", model = vm_xgb_model)\n", - "\n", - "print(\"After assigning Brier Score:\")\n", - "print(f\"New column added: {vm_test_ds.df.columns}\")\n", - "# Display the metric values\n", - "vm_test_ds.df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### A Scorer returns complex object\n", - " The OutlierScore scorer demonstrates how scorers can return complex objects. It returns a dictionary containing per-row outlier detection results. For each row, it includes:\n", - " - is_outlier: Boolean indicating if the row is an outlier\n", - " - anomaly_score: Numerical score indicating degree of outlierness\n", - " - isolation_path: Length of isolation path in the tree\n", - "\n", - "When assigned to a dataset, these dictionary values are automatically unpacked into separate columns with appropriate prefixes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assign Brier Score for XGBoost model\n", - "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.OutlierScore\", model = vm_xgb_model)\n", - "\n", - "print(\"After assigning Score With Confidence:\")\n", - "print(f\"New column added: {vm_test_ds.df.columns}\")\n", - "# Display the metric values\n", - "vm_test_ds.df.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assign Brier Score for XGBoost model\n", - "vm_test_ds.assign_scores(\"validmind.scorer.classification.OutlierScore\")\n", - "\n", - "print(\"After assigning Score With Confidence:\")\n", - "print(f\"New column added: {vm_test_ds.df.columns}\")\n", - "# Display the metric values\n", - "vm_test_ds.df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Multiple Scorers Assignment\n", - "\n", - "We can assign multiple metrics at once by passing a list of Scorer names. This is more efficient than calling assign_scores() multiple times." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assign multiple classification metrics for the Random Forest model\n", - "scorer = [\n", - " \"validmind.scorer.classification.BrierScore\",\n", - " \"validmind.scorer.classification.LogLoss\",\n", - " \"validmind.scorer.classification.Confidence\"\n", - "]\n", - "\n", - "vm_test_ds.assign_scores(metrics = scorer, model = vm_rf_model)\n", - "\n", - "print(\"After assigning multiple row metrics for Random Forest:\")\n", - "rf_columns = [col for col in vm_test_ds.df.columns if 'random_forest_model' in col]\n", - "print(f\"Random Forest columns: {rf_columns}\")\n", - "\n", - "# Display the metric values\n", - "vm_test_ds.df[rf_columns].head()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Passing Parameters to Scorer\n", - "\n", - "Many row metrics accept additional parameters that are passed through to the underlying implementations. Let's demonstrate this with the LogLoss metric." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Assign LogLoss\n", - "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.LogLoss\", model = vm_xgb_model, eps = 1e-16)\n", - "\n", - "# We can also assign with different parameters by calling assign_scores again\n", - "# Note: This will overwrite the previous column with the same name\n", - "print(\"LogLoss assigned successfully\")\n", - "\n", - "# Let's also assign BrierScore and Confidence\n", - "vm_test_ds.assign_scores(metrics = [\"validmind.scorer.classification.BrierScore\",\"validmind.scorer.classification.Confidence\"], model = vm_xgb_model)\n", - "\n", - "print(\"BrierScore and Confidence assigned successfully\")\n", - "\n", - "# Display current XGBoost metric columns\n", - "xgb_columns = [col for col in vm_test_ds.df.columns if 'xgboost_model' in col]\n", - "print(f\"\\nXGBoost model columns: {xgb_columns}\")\n", - "\n", - "vm_test_ds.df[xgb_columns].head()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Multi-Model scorers\n", - "\n", - "One of the powerful features of assign_scores() is the ability to assign scores from multiple models to the same dataset, enabling detailed model comparison at the prediction level." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Let's assign a comprehensive set of metrics for both models\n", - "comprehensive_metrics = [\n", - " \"validmind.scorer.classification.BrierScore\",\n", - " \"validmind.scorer.classification.LogLoss\",\n", - " \"validmind.scorer.classification.Confidence\",\n", - " \"validmind.scorer.classification.Correctness\"\n", - "]\n", - "\n", - "# Assign for XGBoost model\n", - "vm_test_ds.assign_scores(metrics = comprehensive_metrics, model = vm_xgb_model)\n", - "\n", - "# Assign for Random Forest model}\n", - "vm_test_ds.assign_scores(metrics = comprehensive_metrics, model = vm_rf_model)\n", - "\n", - "print(\"Row-level metrics assigned for both models!\")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Scorer Metrics\n", - "The next section demonstrates how to assign individual metrics that compute scores per row, rather than aggregate metrics.\n", - "We'll use several important row metrics:\n", - " \n", - "- Brier Score: Measures how well calibrated the model's probability predictions are for each individual prediction\n", - "- Log Loss: Evaluates how well the predicted probabilities match the true labels on a per-prediction basis\n", - "- Confidence: Measures the model's confidence in its predictions for each row\n", - "- Correctness: Indicates whether each prediction is correct (1) or incorrect (0)\n", - "\n", - "All these metrics provide granular insights into model performance at the individual prediction level." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Let's add some individual metrics that compute per-row scores\n", - "print(\"Adding individual metrics...\")\n", - "\n", - "# Add Brier Score - measures accuracy of probabilistic predictions per row\n", - "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.BrierScore\", model = vm_xgb_model)\n", - "print(\"Added Brier Score - lower values indicate better calibrated probabilities\")\n", - "\n", - "# Add Log Loss - measures how well the predicted probabilities match true labels per row\n", - "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.LogLoss\", model = vm_xgb_model)\n", - "print(\"Added Log Loss - lower values indicate better probability estimates\")\n", - "\n", - "# Create a comparison summary showing first few rows of individual metrics\n", - "print(\"\\nFirst few rows of individual metrics:\")\n", - "individual_metrics = [col for col in vm_test_ds.df.columns if any(m in col for m in ['BrierScore', 'LogLoss', 'Confidence', 'Correctness'])]\n", - "print(vm_test_ds.df[individual_metrics].head())\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vm_test_ds._df.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "### Custom Scorer\n", - "Let's see how to create your own custom scorers using the `@scorer` decorator.\n", - " \n", - "The example below demonstrates a scorer that looks at the class balance in the neighborhood around each data point. For each row, it will give you a score from 0 to 1, where a score closer to 1 means there's a nice even balance of classes in that area of your data. This can help you identify regions where your classes are well-mixed vs regions dominated by a single class." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.scorer import scorer\n", - "import numpy as np\n", - "\n", - "@scorer(\"my_scorers.TestScorer\") \n", - "def test_scorer(model, dataset):\n", - " \"\"\"Custom scorer that calculates class balance ratio.\n", - " \n", - " Args:\n", - " model: Not used in this scorer\n", - " dataset: The dataset to analyze\n", - " \n", - " Returns:\n", - " numpy.ndarray: Array of class balance ratios between 0 and 1,\n", - " where values closer to 1 indicate better class balance in the local neighborhood\n", - " \"\"\"\n", - " # Get target values\n", - " y = dataset.df[dataset.target_column].values\n", - " \n", - " # Calculate local class balance in sliding windows\n", - " window_size = 100\n", - " balance_scores = []\n", - " \n", - " for i in range(len(y)):\n", - " start_idx = max(0, i - window_size//2)\n", - " end_idx = min(len(y), i + window_size//2)\n", - " window = y[start_idx:end_idx]\n", - " \n", - " # Calculate ratio of minority class\n", - " class_ratio = np.mean(window)\n", - " # Adjust to be symmetric around 0.5\n", - " balance_score = 1 - abs(0.5 - class_ratio) * 2\n", - " \n", - " balance_scores.append(balance_score)\n", - " \n", - " return np.array(balance_scores)\n", - "\n", - "# Assign the class balance scores to the dataset\n", - "vm_test_ds.assign_scores(metrics = \"my_scorers.TestScorer\", model = vm_xgb_model)\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Next steps\n", - "\n", - "You can explore the assigned scores right in the notebook as demonstrated above. However, there's even more value in using the ValidMind Platform to work with your model documentation and monitoring.\n", - "\n", - "\n", - "\n", - "### Work with your model documentation\n", - "\n", - "1. From the **Model Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", - "\n", - "2. Click and expand the **Model Development** section.\n", - "\n", - "The scores you've assigned using `assign_scores()` become part of your model's documentation and can be used in ongoing monitoring workflows. You can view these metrics over time, set up alerts for performance drift, and compare models systematically. [Learn more ...](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)\n", - "\n", - "\n", - "\n", - "### Discover more learning resources\n", - "\n", - "We offer many interactive notebooks to help you work with model scoring and evaluation:\n", - "\n", - "- [Run unit metrics](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", - "- [Assign predictions](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", - "- [Model comparison workflows](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", - "\n", - "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Upgrade ValidMind\n", - "\n", - "
After installing ValidMind, you'll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", - "\n", - "Retrieve the information for the currently installed version of ValidMind:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "%pip show validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", - "\n", - "```bash\n", - "%pip install --upgrade validmind\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You may need to restart your kernel after running the upgrade package for changes to be applied." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "ValidMind Library", - "language": "python", - "name": "validmind" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Intro to Assign Scores" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `assign_scores()` method is a powerful feature that allows you to compute and add scorer scores as new columns in your dataset. This method takes a model and metric(s) as input, computes the specified metrics from the ValidMind scorer library, and adds them as new columns. The computed metrics provide per-row values, giving you granular insights into model performance at the individual prediction level.\n", + "\n", + "In this interactive notebook, we demonstrate how to use the `assign_scores()` method effectively. We'll walk through a complete example using a customer churn dataset, showing how to compute and assign row-level metrics (like Brier Score and Log Loss) that provide detailed performance insights for each prediction. You'll learn how to work with single and multiple scorers, pass custom parameters, and handle different metric types - all while maintaining a clean, organized dataset structure. Currently, assign_scores() supports all metrics available in the validmind.scorer module.\n", + "\n", + "**The Power of Row-Level Scoring**\n", + "\n", + "Traditional model evaluation workflows often focus on aggregate metrics that provide overall performance summaries. The `assign_scores()` method complements this by providing granular, row-level insights that help you:\n", + "\n", + "- **Identify Problematic Predictions**: Spot individual cases where your model performs poorly\n", + "- **Understand Model Behavior**: Analyze how model performance varies across different types of inputs\n", + "- **Enable Detailed Analysis**: Perform targeted investigations on specific subsets of your data\n", + "- **Support Model Debugging**: Pinpoint exactly where and why your model makes errors\n", + "\n", + "**Understanding assign_scores()**\n", + "\n", + "The `assign_scores()` method computes row metrics for a given model-dataset combination and adds the results as new columns to your dataset. Each new column follows the naming convention: `{model.input_id}_{metric_name}`, ensuring clear identification of which model and metric combination generated each score.\n", + "\n", + "Key features:\n", + "\n", + "- **Row-Level Focus**: Computes per-prediction metrics rather than aggregate scores\n", + "- **Flexible Input**: Accepts single metrics or lists of metrics\n", + "- **Parameter Support**: Allows passing additional parameters to underlying metric implementations\n", + "- **Multi-Model Support**: Can assign scores from multiple models to the same dataset\n", + "- **Type Agnostic**: Works with classification, regression, and other model types\n", + "\n", + "This approach provides detailed insights into your model's performance at the individual prediction level, enabling more sophisticated analysis and debugging workflows." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1__) \n", + " - [Before you begin](#toc1_1__) \n", + " - [New to ValidMind?](#toc1_2__) \n", + "- [Setting up](#toc2__) \n", + " - [Install the ValidMind Library](#toc2_1__) \n", + " - [Initialize the ValidMind Library](#toc2_2__) \n", + " - [Register sample model](#toc2_2_1__) \n", + " - [Apply documentation template](#toc2_2_2__) \n", + " - [Get your code snippet](#toc2_2_3__) \n", + "- [Load the demo dataset](#toc3__) \n", + "- [Train models for testing](#toc4__) \n", + "- [Initialize ValidMind objects](#toc5__) \n", + "- [Assign predictions](#toc6__) \n", + "- [Using assign_scores()](#toc7__) \n", + " - [Basic Usage](#toc7_1__) \n", + " - [Single Scorer Assignment](#toc7_2__) \n", + " - [A Scorer returns complex object](#toc7_3__) \n", + " - [Multiple Scorers Assignment](#toc7_4__) \n", + " - [Passing Parameters to Scorer](#toc7_5__) \n", + " - [Multi-Model scorers](#toc7_6__) \n", + " - [Scorer Metrics](#toc7_7__) \n", + " - [Custom Scorer](#toc7_8__) \n", + "- [Next steps](#toc8__) \n", + " - [Work with your model documentation](#toc8_1__) \n", + " - [Discover more learning resources](#toc8_2__) \n", + "- [Upgrade ValidMind](#toc9__) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Register sample model\n", + "\n", + "Let's first register a sample model for use with this notebook:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Next >** to continue to assignment of model stakeholders. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + " For example, to register a model for use with this notebook, select the following use case: `Marketing/Sales - Analytics`\n", + "\n", + "4. Select your own name under the **MODEL OWNER** drop-down.\n", + "\n", + "5. Click **Register Model** to add the model to your inventory." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Apply documentation template\n", + "\n", + "Once you've registered your model, let's select a documentation template. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "1. In the left sidebar that appears for your model, click **Documents** and select **Documentation**.\n", + "\n", + "2. Under **TEMPLATE**, select `Binary classification`.\n", + "\n", + "3. Click **Use Template** to apply the template." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook.\n", + "\n", + "1. On the left sidebar that appears for your model, select **Getting Started** and click **Copy snippet to clipboard**.\n", + "2. Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " api_host=\"...\",\n", + " api_key=\"...\",\n", + " api_secret=\"...\",\n", + " model=\"...\",\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Load the demo dataset\n", + "\n", + "In this example, we load a demo dataset to demonstrate the assign_scores functionality with customer churn prediction models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()\n", + "raw_df.head()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Train models for testing\n", + "\n", + "We'll train two different customer churn models to demonstrate the assign_scores functionality with multiple models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import xgboost as xgb\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "\n", + "# Preprocess the data\n", + "train_df, validation_df, test_df = demo_dataset.preprocess(raw_df)\n", + "\n", + "# Prepare training data\n", + "x_train = train_df.drop(demo_dataset.target_column, axis=1)\n", + "y_train = train_df[demo_dataset.target_column]\n", + "x_val = validation_df.drop(demo_dataset.target_column, axis=1)\n", + "y_val = validation_df[demo_dataset.target_column]\n", + "\n", + "# Train XGBoost model\n", + "xgb_model = xgb.XGBClassifier(early_stopping_rounds=10, random_state=42)\n", + "xgb_model.set_params(\n", + " eval_metric=[\"error\", \"logloss\", \"auc\"],\n", + ")\n", + "xgb_model.fit(\n", + " x_train,\n", + " y_train,\n", + " eval_set=[(x_val, y_val)],\n", + " verbose=False,\n", + ")\n", + "\n", + "# Train Random Forest model\n", + "rf_model = RandomForestClassifier(n_estimators=100, random_state=42)\n", + "rf_model.fit(x_train, y_train)\n", + "\n", + "print(\"Models trained successfully!\")\n", + "print(f\"XGBoost training accuracy: {xgb_model.score(x_train, y_train):.3f}\")\n", + "print(f\"Random Forest training accuracy: {rf_model.score(x_train, y_train):.3f}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Initialize ValidMind objects\n", + "\n", + "We initialize ValidMind `dataset` and `model` objects. The `input_id` parameter is crucial for the assign_scores functionality as it determines the column naming convention for assigned scores." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize datasets\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset\",\n", + " dataset=train_df,\n", + " target_column=demo_dataset.target_column,\n", + ")\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset\",\n", + " dataset=test_df,\n", + " target_column=demo_dataset.target_column,\n", + ")\n", + "\n", + "# Initialize models with descriptive input_ids\n", + "vm_xgb_model = vm.init_model(model=xgb_model, input_id=\"xgboost_model\")\n", + "vm_rf_model = vm.init_model(model=rf_model, input_id=\"random_forest_model\")\n", + "\n", + "print(\"ValidMind objects initialized successfully!\")\n", + "print(f\"XGBoost model ID: {vm_xgb_model.input_id}\")\n", + "print(f\"Random Forest model ID: {vm_rf_model.input_id}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Assign predictions\n", + "\n", + "Before we can use assign_scores(), we need to assign predictions to our datasets. This step is essential as many unit metrics require both actual and predicted values." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Assign predictions for both models to both datasets\n", + "vm_train_ds.assign_predictions(model=vm_xgb_model)\n", + "vm_train_ds.assign_predictions(model=vm_rf_model)\n", + "\n", + "vm_test_ds.assign_predictions(model=vm_xgb_model)\n", + "vm_test_ds.assign_predictions(model=vm_rf_model)\n", + "\n", + "print(\"Predictions assigned successfully!\")\n", + "print(f\"Test dataset now has {len(vm_test_ds.df.columns)} columns\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Using assign_scores()\n", + "\n", + "Now we'll explore the various ways to use the assign_scores() method to integrate performance metrics directly into your dataset." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Basic Usage\n", + "\n", + "The assign_scores() method has a simple interface:\n", + "\n", + "```python\n", + "dataset.assign_scores(model, metrics, **kwargs)\n", + "```\n", + "\n", + "- **model**: A ValidMind model object\n", + "- **metrics**: Single metric ID or list of metric IDs (can use short names or full IDs)\n", + "- **kwargs**: Additional parameters passed to the underlying metric implementations\n", + "\n", + "Let's first check what columns we currently have in our test dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Current columns in test dataset:\")\n", + "for i, col in enumerate(vm_test_ds.df.columns, 1):\n", + " print(f\"{i:2d}. {col}\")\n", + "\n", + "print(f\"\\nDataset shape: {vm_test_ds.df.shape}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Single Scorer Assignment\n", + " \n", + "Let's start by assigning a single Scorer - the Brier Score - for our XGBoost model on the test dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Assign Brier Score for XGBoost model\n", + "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.BrierScore\", model = vm_xgb_model)\n", + "\n", + "print(\"After assigning Brier Score:\")\n", + "print(f\"New column added: {vm_test_ds.df.columns}\")\n", + "# Display the metric values\n", + "vm_test_ds.df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### A Scorer returns complex object\n", + " The OutlierScore scorer demonstrates how scorers can return complex objects. It returns a dictionary containing per-row outlier detection results. For each row, it includes:\n", + " - is_outlier: Boolean indicating if the row is an outlier\n", + " - anomaly_score: Numerical score indicating degree of outlierness\n", + " - isolation_path: Length of isolation path in the tree\n", + "\n", + "When assigned to a dataset, these dictionary values are automatically unpacked into separate columns with appropriate prefixes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Assign Brier Score for XGBoost model\n", + "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.OutlierScore\", model = vm_xgb_model)\n", + "\n", + "print(\"After assigning Score With Confidence:\")\n", + "print(f\"New column added: {vm_test_ds.df.columns}\")\n", + "# Display the metric values\n", + "vm_test_ds.df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Assign Brier Score for XGBoost model\n", + "vm_test_ds.assign_scores(\"validmind.scorer.classification.OutlierScore\")\n", + "\n", + "print(\"After assigning Score With Confidence:\")\n", + "print(f\"New column added: {vm_test_ds.df.columns}\")\n", + "# Display the metric values\n", + "vm_test_ds.df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Multiple Scorers Assignment\n", + "\n", + "We can assign multiple metrics at once by passing a list of Scorer names. This is more efficient than calling assign_scores() multiple times." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Assign multiple classification metrics for the Random Forest model\n", + "scorer = [\n", + " \"validmind.scorer.classification.BrierScore\",\n", + " \"validmind.scorer.classification.LogLoss\",\n", + " \"validmind.scorer.classification.Confidence\"\n", + "]\n", + "\n", + "vm_test_ds.assign_scores(metrics = scorer, model = vm_rf_model)\n", + "\n", + "print(\"After assigning multiple row metrics for Random Forest:\")\n", + "rf_columns = [col for col in vm_test_ds.df.columns if 'random_forest_model' in col]\n", + "print(f\"Random Forest columns: {rf_columns}\")\n", + "\n", + "# Display the metric values\n", + "vm_test_ds.df[rf_columns].head()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Passing Parameters to Scorer\n", + "\n", + "Many row metrics accept additional parameters that are passed through to the underlying implementations. Let's demonstrate this with the LogLoss metric." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Assign LogLoss\n", + "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.LogLoss\", model = vm_xgb_model, eps = 1e-16)\n", + "\n", + "# We can also assign with different parameters by calling assign_scores again\n", + "# Note: This will overwrite the previous column with the same name\n", + "print(\"LogLoss assigned successfully\")\n", + "\n", + "# Let's also assign BrierScore and Confidence\n", + "vm_test_ds.assign_scores(metrics = [\"validmind.scorer.classification.BrierScore\",\"validmind.scorer.classification.Confidence\"], model = vm_xgb_model)\n", + "\n", + "print(\"BrierScore and Confidence assigned successfully\")\n", + "\n", + "# Display current XGBoost metric columns\n", + "xgb_columns = [col for col in vm_test_ds.df.columns if 'xgboost_model' in col]\n", + "print(f\"\\nXGBoost model columns: {xgb_columns}\")\n", + "\n", + "vm_test_ds.df[xgb_columns].head()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Multi-Model scorers\n", + "\n", + "One of the powerful features of assign_scores() is the ability to assign scores from multiple models to the same dataset, enabling detailed model comparison at the prediction level." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's assign a comprehensive set of metrics for both models\n", + "comprehensive_metrics = [\n", + " \"validmind.scorer.classification.BrierScore\",\n", + " \"validmind.scorer.classification.LogLoss\",\n", + " \"validmind.scorer.classification.Confidence\",\n", + " \"validmind.scorer.classification.Correctness\"\n", + "]\n", + "\n", + "# Assign for XGBoost model\n", + "vm_test_ds.assign_scores(metrics = comprehensive_metrics, model = vm_xgb_model)\n", + "\n", + "# Assign for Random Forest model}\n", + "vm_test_ds.assign_scores(metrics = comprehensive_metrics, model = vm_rf_model)\n", + "\n", + "print(\"Row-level metrics assigned for both models!\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Scorer Metrics\n", + "The next section demonstrates how to assign individual metrics that compute scores per row, rather than aggregate metrics.\n", + "We'll use several important row metrics:\n", + " \n", + "- Brier Score: Measures how well calibrated the model's probability predictions are for each individual prediction\n", + "- Log Loss: Evaluates how well the predicted probabilities match the true labels on a per-prediction basis\n", + "- Confidence: Measures the model's confidence in its predictions for each row\n", + "- Correctness: Indicates whether each prediction is correct (1) or incorrect (0)\n", + "\n", + "All these metrics provide granular insights into model performance at the individual prediction level." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's add some individual metrics that compute per-row scores\n", + "print(\"Adding individual metrics...\")\n", + "\n", + "# Add Brier Score - measures accuracy of probabilistic predictions per row\n", + "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.BrierScore\", model = vm_xgb_model)\n", + "print(\"Added Brier Score - lower values indicate better calibrated probabilities\")\n", + "\n", + "# Add Log Loss - measures how well the predicted probabilities match true labels per row\n", + "vm_test_ds.assign_scores(metrics = \"validmind.scorer.classification.LogLoss\", model = vm_xgb_model)\n", + "print(\"Added Log Loss - lower values indicate better probability estimates\")\n", + "\n", + "# Create a comparison summary showing first few rows of individual metrics\n", + "print(\"\\nFirst few rows of individual metrics:\")\n", + "individual_metrics = [col for col in vm_test_ds.df.columns if any(m in col for m in ['BrierScore', 'LogLoss', 'Confidence', 'Correctness'])]\n", + "print(vm_test_ds.df[individual_metrics].head())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_test_ds._df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Custom Scorer\n", + "Let's see how to create your own custom scorers using the `@scorer` decorator.\n", + " \n", + "The example below demonstrates a scorer that looks at the class balance in the neighborhood around each data point. For each row, it will give you a score from 0 to 1, where a score closer to 1 means there's a nice even balance of classes in that area of your data. This can help you identify regions where your classes are well-mixed vs regions dominated by a single class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.scorer import scorer\n", + "import numpy as np\n", + "\n", + "@scorer(\"my_scorers.TestScorer\") \n", + "def test_scorer(model, dataset):\n", + " \"\"\"Custom scorer that calculates class balance ratio.\n", + " \n", + " Args:\n", + " model: Not used in this scorer\n", + " dataset: The dataset to analyze\n", + " \n", + " Returns:\n", + " numpy.ndarray: Array of class balance ratios between 0 and 1,\n", + " where values closer to 1 indicate better class balance in the local neighborhood\n", + " \"\"\"\n", + " # Get target values\n", + " y = dataset.df[dataset.target_column].values\n", + " \n", + " # Calculate local class balance in sliding windows\n", + " window_size = 100\n", + " balance_scores = []\n", + " \n", + " for i in range(len(y)):\n", + " start_idx = max(0, i - window_size//2)\n", + " end_idx = min(len(y), i + window_size//2)\n", + " window = y[start_idx:end_idx]\n", + " \n", + " # Calculate ratio of minority class\n", + " class_ratio = np.mean(window)\n", + " # Adjust to be symmetric around 0.5\n", + " balance_score = 1 - abs(0.5 - class_ratio) * 2\n", + " \n", + " balance_scores.append(balance_score)\n", + " \n", + " return np.array(balance_scores)\n", + "\n", + "# Assign the class balance scores to the dataset\n", + "vm_test_ds.assign_scores(metrics = \"my_scorers.TestScorer\", model = vm_xgb_model)\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps\n", + "\n", + "You can explore the assigned scores right in the notebook as demonstrated above. However, there's even more value in using the ValidMind Platform to work with your model documentation and monitoring.\n", + "\n", + "\n", + "\n", + "### Work with your model documentation\n", + "\n", + "1. From the **Model Inventory** in the ValidMind Platform, go to the model you registered earlier. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/working-with-model-inventory.html))\n", + "\n", + "2. Click and expand the **Model Development** section.\n", + "\n", + "The scores you've assigned using `assign_scores()` become part of your model's documentation and can be used in ongoing monitoring workflows. You can view these metrics over time, set up alerts for performance drift, and compare models systematically. [Learn more ...](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)\n", + "\n", + "\n", + "\n", + "### Discover more learning resources\n", + "\n", + "We offer many interactive notebooks to help you work with model scoring and evaluation:\n", + "\n", + "- [Run unit metrics](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", + "- [Assign predictions](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", + "- [Model comparison workflows](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", + "\n", + "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you'll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "%pip show validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "id": "copyright-25e9888fae40481b9007f6a2e2bdd60b", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/site/notebooks/how_to/configure_dataset_features.ipynb b/site/notebooks/how_to/configure_dataset_features.ipynb index 13c9e0896b..30d86a1ee0 100644 --- a/site/notebooks/how_to/configure_dataset_features.ipynb +++ b/site/notebooks/how_to/configure_dataset_features.ipynb @@ -432,14 +432,17 @@ }, { "cell_type": "markdown", + "id": "copyright-77bd4876ab7945d6a1f592e432d81ca5", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } @@ -461,8 +464,7 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.13" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/site/notebooks/how_to/customize_test_result_descriptions.ipynb b/site/notebooks/how_to/customize_test_result_descriptions.ipynb index b81f97488c..c3aca95e16 100644 --- a/site/notebooks/how_to/customize_test_result_descriptions.ipynb +++ b/site/notebooks/how_to/customize_test_result_descriptions.ipynb @@ -1002,14 +1002,17 @@ }, { "cell_type": "markdown", + "id": "copyright-5559bb769ab34ad5b09176ce718a7b6c", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb b/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb index 42bc0a8055..74e04e1ab9 100644 --- a/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb +++ b/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb @@ -583,14 +583,17 @@ }, { "cell_type": "markdown", + "id": "copyright-6ce412276b6244aab16b2e3443c6a861", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/enable_pii_detection.ipynb b/site/notebooks/how_to/enable_pii_detection.ipynb index a1dc943c0d..3d648e82cf 100644 --- a/site/notebooks/how_to/enable_pii_detection.ipynb +++ b/site/notebooks/how_to/enable_pii_detection.ipynb @@ -600,15 +600,17 @@ }, { "cell_type": "markdown", - "id": "f1e696a9", + "id": "copyright-b91f78083ca449d7a755ccfb02d001b1", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/explore_test_suites.ipynb b/site/notebooks/how_to/explore_test_suites.ipynb index 3e75eff10c..f9f0a72861 100644 --- a/site/notebooks/how_to/explore_test_suites.ipynb +++ b/site/notebooks/how_to/explore_test_suites.ipynb @@ -688,14 +688,17 @@ }, { "cell_type": "markdown", + "id": "copyright-a1609733aeef47c78c2de0ece28f59fa", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/explore_tests.ipynb b/site/notebooks/how_to/explore_tests.ipynb index 27119aacd1..9fc1d3292e 100644 --- a/site/notebooks/how_to/explore_tests.ipynb +++ b/site/notebooks/how_to/explore_tests.ipynb @@ -1,4423 +1,4426 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Explore tests\n", - "\n", - "Explore the individual out-the-box tests available in the ValidMind Library, and identify which tests to run to evaluate different aspects of your model. Browse available tests, view their descriptions, and filter by tags or task type to find tests relevant to your use case.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::: {.content-hidden when-format=\"html\"}\n", - "## Contents \n", - "- [About ValidMind](#toc1_) \n", - " - [Before you begin](#toc1_1_) \n", - " - [New to ValidMind?](#toc1_2_) \n", - " - [Key concepts](#toc1_3_) \n", - "- [Install the ValidMind Library](#toc2_) \n", - "- [List all available tests](#toc3_) \n", - "- [Understand tags and task types](#toc4_) \n", - "- [Filter tests by tags and task types](#toc5_)\n", - "- [Store test sets for use](#toc6_) \n", - "- [Next steps](#toc7_) \n", - " - [Discover more learning resources](#toc7_1_) \n", - "\n", - ":::\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## About ValidMind\n", - "\n", - "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", - "\n", - "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", - "\n", - "\n", - "\n", - "### Before you begin\n", - "\n", - "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", - "\n", - "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", - "\n", - "\n", - "\n", - "\n", - "### New to ValidMind?\n", - "\n", - "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", - "\n", - "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", - "

\n", - "Register with ValidMind
\n", - "\n", - "\n", - "\n", - "### Key concepts\n", - "\n", - "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", - "\n", - "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", - "\n", - "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", - "\n", - "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", - "\n", - "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", - "\n", - " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", - " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", - " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", - " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", - "\n", - "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", - "\n", - "**Outputs**: Custom tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", - "\n", - "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", - "\n", - "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Install the ValidMind Library\n", - "\n", - "
Recommended Python versions\n", - "

\n", - "Python 3.8 <= x <= 3.11
\n", - "\n", - "To install the library:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%pip install -q validmind" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## List all available tests\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "Start by importing the functions from the [validmind.tests](https://docs.validmind.ai/validmind/validmind/tests.html) module for listing tests, listing tasks, listing tags, and listing tasks and tags to access these functions in the rest of this notebook:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from validmind.tests import (\n", - " list_tests,\n", - " list_tasks,\n", - " list_tags,\n", - " list_tasks_and_tags,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use [list_tests()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) to retrieve all available ValidMind tests, which returns a DataFrame with the following columns:\n", - "\n", - "- **ID** – A unique identifier for each test.\n", - "- **Name** – The test’s name.\n", - "- **Description** – A short summary of what the test evaluates.\n", - "- **Tags** – Keywords that describe what the test does or applies to.\n", - "- **Tasks** – The type of modeling task the test supports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.data_validation.ACFandPACFPlotAC Fand PACF PlotAnalyzes time series data using Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF) plots to...TrueFalse['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'visualization']['regression']
validmind.data_validation.ADFADFAssesses the stationarity of a time series dataset using the Augmented Dickey-Fuller (ADF) test....FalseTrue['dataset']{}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test', 'stationarity']['regression']
validmind.data_validation.AutoARAuto ARAutomatically identifies the optimal Autoregressive (AR) order for a time series using BIC and AIC criteria....FalseTrue['dataset']{'max_ar_order': {'type': 'int', 'default': 3}}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test']['regression']
validmind.data_validation.AutoMAAuto MAAutomatically selects the optimal Moving Average (MA) order for each variable in a time series dataset based on...FalseTrue['dataset']{'max_ma_order': {'type': 'int', 'default': 3}}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test']['regression']
validmind.data_validation.AutoStationarityAuto StationarityAutomates Augmented Dickey-Fuller test to assess stationarity across multiple time series in a DataFrame....FalseTrue['dataset']{'max_order': {'type': 'int', 'default': 5}, 'threshold': {'type': 'float', 'default': 0.05}}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test']['regression']
validmind.data_validation.BivariateScatterPlotsBivariate Scatter PlotsGenerates bivariate scatterplots to visually inspect relationships between pairs of numerical predictor variables...TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'visualization']['classification']
validmind.data_validation.BoxPierceBox PierceDetects autocorrelation in time-series data through the Box-Pierce test to validate model performance....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'statsmodels']['regression']
validmind.data_validation.ChiSquaredFeaturesTableChi Squared Features TableAssesses the statistical association between categorical features and a target variable using the Chi-Squared test....FalseTrue['dataset']{'p_threshold': {'type': '_empty', 'default': 0.05}}['tabular_data', 'categorical_data', 'statistical_test']['classification']
validmind.data_validation.ClassImbalanceClass ImbalanceEvaluates and quantifies class distribution imbalance in a dataset used by a machine learning model....TrueTrue['dataset']{'min_percent_threshold': {'type': 'int', 'default': 10}}['tabular_data', 'binary_classification', 'multiclass_classification', 'data_quality']['classification']
validmind.data_validation.DatasetDescriptionDataset DescriptionProvides comprehensive analysis and statistical summaries of each column in a machine learning model's dataset....FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DatasetSplitDataset SplitEvaluates and visualizes the distribution proportions among training, testing, and validation datasets of an ML...FalseTrue['datasets']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DescriptiveStatisticsDescriptive StatisticsPerforms a detailed descriptive statistical analysis of both numerical and categorical data within a model's...FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'data_quality']['classification', 'regression']
validmind.data_validation.DickeyFullerGLSDickey Fuller GLSAssesses stationarity in time series data using the Dickey-Fuller GLS test to determine the order of integration....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'unit_root_test']['regression']
validmind.data_validation.DuplicatesDuplicatesTests dataset for duplicate entries, ensuring model reliability via data quality verification....FalseTrue['dataset']{'min_threshold': {'type': '_empty', 'default': 1}}['tabular_data', 'data_quality', 'text_data']['classification', 'regression']
validmind.data_validation.EngleGrangerCointEngle Granger CointAssesses the degree of co-movement between pairs of time series data using the Engle-Granger cointegration test....FalseTrue['dataset']{'threshold': {'type': 'float', 'default': 0.05}}['time_series_data', 'statistical_test', 'forecasting']['regression']
validmind.data_validation.FeatureTargetCorrelationPlotFeature Target Correlation PlotVisualizes the correlation between input features and the model's target output in a color-coded horizontal bar...TrueFalse['dataset']{'fig_height': {'type': '_empty', 'default': 600}}['tabular_data', 'visualization', 'correlation']['classification', 'regression']
validmind.data_validation.HighCardinalityHigh CardinalityAssesses the number of unique values in categorical columns to detect high cardinality and potential overfitting....FalseTrue['dataset']{'num_threshold': {'type': 'int', 'default': 100}, 'percent_threshold': {'type': 'float', 'default': 0.1}, 'threshold_type': {'type': 'str', 'default': 'percent'}}['tabular_data', 'data_quality', 'categorical_data']['classification', 'regression']
validmind.data_validation.HighPearsonCorrelationHigh Pearson CorrelationIdentifies highly correlated feature pairs in a dataset suggesting feature redundancy or multicollinearity....FalseTrue['dataset']{'max_threshold': {'type': 'float', 'default': 0.3}, 'top_n_correlations': {'type': 'int', 'default': 10}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'data_quality', 'correlation']['classification', 'regression']
validmind.data_validation.IQROutliersBarPlotIQR Outliers Bar PlotVisualizes outlier distribution across percentiles in numerical data using the Interquartile Range (IQR) method....TrueFalse['dataset']{'threshold': {'type': 'float', 'default': 1.5}, 'fig_width': {'type': 'int', 'default': 800}}['tabular_data', 'visualization', 'numerical_data']['classification', 'regression']
validmind.data_validation.IQROutliersTableIQR Outliers TableDetermines and summarizes outliers in numerical features using the Interquartile Range method....FalseTrue['dataset']{'threshold': {'type': 'float', 'default': 1.5}}['tabular_data', 'numerical_data']['classification', 'regression']
validmind.data_validation.IsolationForestOutliersIsolation Forest OutliersDetects outliers in a dataset using the Isolation Forest algorithm and visualizes results through scatter plots....TrueFalse['dataset']{'random_state': {'type': 'int', 'default': 0}, 'contamination': {'type': 'float', 'default': 0.1}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'anomaly_detection']['classification']
validmind.data_validation.JarqueBeraJarque BeraAssesses normality of dataset features in an ML model using the Jarque-Bera test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.KPSSKPSSAssesses the stationarity of time-series data in a machine learning model using the KPSS unit root test....FalseTrue['dataset']{}['time_series_data', 'stationarity', 'unit_root_test', 'statsmodels']['data_validation']
validmind.data_validation.LJungBoxL Jung BoxAssesses autocorrelations in dataset features by performing a Ljung-Box test on each feature....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'statsmodels']['regression']
validmind.data_validation.LaggedCorrelationHeatmapLagged Correlation HeatmapAssesses and visualizes correlation between target variable and lagged independent variables in a time-series...TrueFalse['dataset']{'num_lags': {'type': 'int', 'default': 10}}['time_series_data', 'visualization']['regression']
validmind.data_validation.MissingValuesMissing ValuesEvaluates dataset quality by ensuring missing value ratio across all features does not exceed a set threshold....FalseTrue['dataset']{'min_threshold': {'type': 'int', 'default': 1}}['tabular_data', 'data_quality']['classification', 'regression']
validmind.data_validation.MissingValuesBarPlotMissing Values Bar PlotAssesses the percentage and distribution of missing values in the dataset via a bar plot, with emphasis on...TrueFalse['dataset']{'threshold': {'type': 'int', 'default': 80}, 'fig_height': {'type': 'int', 'default': 600}}['tabular_data', 'data_quality', 'visualization']['classification', 'regression']
validmind.data_validation.MutualInformationMutual InformationCalculates mutual information scores between features and target variable to evaluate feature relevance....TrueFalse['dataset']{'min_threshold': {'type': 'float', 'default': 0.01}, 'task': {'type': 'str', 'default': 'classification'}}['feature_selection', 'data_analysis']['classification', 'regression']
validmind.data_validation.PearsonCorrelationMatrixPearson Correlation MatrixEvaluates linear dependency between numerical variables in a dataset via a Pearson Correlation coefficient heat map....TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'correlation']['classification', 'regression']
validmind.data_validation.PhillipsPerronArchPhillips Perron ArchAssesses the stationarity of time series data in each feature of the ML model using the Phillips-Perron test....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'unit_root_test']['regression']
validmind.data_validation.ProtectedClassesDescriptionProtected Classes DescriptionVisualizes the distribution of protected classes in the dataset relative to the target variable...TrueTrue['dataset']{'protected_classes': {'type': '_empty', 'default': None}}['bias_and_fairness', 'descriptive_statistics']['classification', 'regression']
validmind.data_validation.RollingStatsPlotRolling Stats PlotEvaluates the stationarity of time series data by plotting its rolling mean and standard deviation over a specified...TrueFalse['dataset']{'window_size': {'type': 'int', 'default': 12}}['time_series_data', 'visualization', 'stationarity']['regression']
validmind.data_validation.RunsTestRuns TestExecutes Runs Test on ML model to detect non-random patterns in output data sequence....FalseTrue['dataset']{}['tabular_data', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.ScatterPlotScatter PlotAssesses visual relationships, patterns, and outliers among features in a dataset through scatter plot matrices....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.ScoreBandDefaultRatesScore Band Default RatesAnalyzes default rates and population distribution across credit score bands....FalseTrue['dataset', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.data_validation.SeasonalDecomposeSeasonal DecomposeAssesses patterns and seasonality in a time series dataset by decomposing its features into foundational components....TrueFalse['dataset']{'seasonal_model': {'type': 'str', 'default': 'additive'}}['time_series_data', 'seasonality', 'statsmodels']['regression']
validmind.data_validation.ShapiroWilkShapiro WilkEvaluates feature-wise normality of training data using the Shapiro-Wilk test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test']['classification', 'regression']
validmind.data_validation.SkewnessSkewnessEvaluates the skewness of numerical data in a dataset to check against a defined threshold, aiming to ensure data...FalseTrue['dataset']{'max_threshold': {'type': '_empty', 'default': 1}}['data_quality', 'tabular_data']['classification', 'regression']
validmind.data_validation.SpreadPlotSpread PlotAssesses potential correlations between pairs of time series variables through visualization to enhance...TrueFalse['dataset']{}['time_series_data', 'visualization']['regression']
validmind.data_validation.TabularCategoricalBarPlotsTabular Categorical Bar PlotsGenerates and visualizes bar plots for each category in categorical features to evaluate the dataset's composition....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDateTimeHistogramsTabular Date Time HistogramsGenerates histograms to provide graphical insight into the distribution of time intervals in a model's datetime...TrueFalse['dataset']{}['time_series_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDescriptionTablesTabular Description TablesSummarizes key descriptive statistics for numerical, categorical, and datetime variables in a dataset....FalseTrue['dataset']{}['tabular_data']['classification', 'regression']
validmind.data_validation.TabularNumericalHistogramsTabular Numerical HistogramsGenerates histograms for each numerical feature in a dataset to provide visual insights into data distribution and...TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TargetRateBarPlotsTarget Rate Bar PlotsGenerates bar plots visualizing the default rates of categorical features for a classification machine learning...TrueFalse['dataset']{}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.TimeSeriesDescriptionTime Series DescriptionGenerates a detailed analysis for the provided time series dataset, summarizing key statistics to identify trends,...FalseTrue['dataset']{}['time_series_data', 'analysis']['regression']
validmind.data_validation.TimeSeriesDescriptiveStatisticsTime Series Descriptive StatisticsEvaluates the descriptive statistics of a time series dataset to identify trends, patterns, and data quality issues....FalseTrue['dataset']{}['time_series_data', 'analysis']['regression']
validmind.data_validation.TimeSeriesFrequencyTime Series FrequencyEvaluates consistency of time series data frequency and generates a frequency plot....TrueTrue['dataset']{}['time_series_data']['regression']
validmind.data_validation.TimeSeriesHistogramTime Series HistogramVisualizes distribution of time-series data using histograms and Kernel Density Estimation (KDE) lines....TrueFalse['dataset']{'nbins': {'type': '_empty', 'default': 30}}['data_validation', 'visualization', 'time_series_data']['regression', 'time_series_forecasting']
validmind.data_validation.TimeSeriesLinePlotTime Series Line PlotGenerates and analyses time-series data through line plots revealing trends, patterns, anomalies over time....TrueFalse['dataset']{}['time_series_data', 'visualization']['regression']
validmind.data_validation.TimeSeriesMissingValuesTime Series Missing ValuesValidates time-series data quality by confirming the count of missing values is below a certain threshold....TrueTrue['dataset']{'min_threshold': {'type': 'int', 'default': 1}}['time_series_data']['regression']
validmind.data_validation.TimeSeriesOutliersTime Series OutliersIdentifies and visualizes outliers in time-series data using the z-score method....FalseTrue['dataset']{'zscore_threshold': {'type': 'int', 'default': 3}}['time_series_data']['regression']
validmind.data_validation.TooManyZeroValuesToo Many Zero ValuesIdentifies numerical columns in a dataset that contain an excessive number of zero values, defined by a threshold...FalseTrue['dataset']{'max_percent_threshold': {'type': 'float', 'default': 0.03}}['tabular_data']['regression', 'classification']
validmind.data_validation.UniqueRowsUnique RowsVerifies the diversity of the dataset by ensuring that the count of unique rows exceeds a prescribed threshold....FalseTrue['dataset']{'min_percent_threshold': {'type': 'float', 'default': 1}}['tabular_data']['regression', 'classification']
validmind.data_validation.WOEBinPlotsWOE Bin PlotsGenerates visualizations of Weight of Evidence (WoE) and Information Value (IV) for understanding predictive power...TrueFalse['dataset']{'breaks_adj': {'type': 'list', 'default': None}, 'fig_height': {'type': 'int', 'default': 600}, 'fig_width': {'type': 'int', 'default': 500}}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.WOEBinTableWOE Bin TableAssesses the Weight of Evidence (WoE) and Information Value (IV) of each feature to evaluate its predictive power...FalseTrue['dataset']{'breaks_adj': {'type': 'list', 'default': None}}['tabular_data', 'categorical_data']['classification']
validmind.data_validation.ZivotAndrewsArchZivot Andrews ArchEvaluates the order of integration and stationarity of time series data using the Zivot-Andrews unit root test....FalseTrue['dataset']{}['time_series_data', 'stationarity', 'unit_root_test']['regression']
validmind.data_validation.nlp.CommonWordsCommon WordsAssesses the most frequent non-stopwords in a text column for identifying prevalent language patterns....TrueFalse['dataset']{}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization']
validmind.data_validation.nlp.HashtagsHashtagsAssesses hashtag frequency in a text column, highlighting usage trends and potential dataset bias or spam....TrueFalse['dataset']{'top_hashtags': {'type': 'int', 'default': 25}}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization']
validmind.data_validation.nlp.LanguageDetectionLanguage DetectionAssesses the diversity of languages in a textual dataset by detecting and visualizing the distribution of languages....TrueFalse['dataset']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.data_validation.nlp.MentionsMentionsCalculates and visualizes frequencies of '@' prefixed mentions in a text-based dataset for NLP model analysis....TrueFalse['dataset']{'top_mentions': {'type': 'int', 'default': 25}}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization']
validmind.data_validation.nlp.PolarityAndSubjectivityPolarity And SubjectivityAnalyzes the polarity and subjectivity of text data within a given dataset to visualize the sentiment distribution....TrueTrue['dataset']{'threshold_subjectivity': {'type': '_empty', 'default': 0.5}, 'threshold_polarity': {'type': '_empty', 'default': 0}}['nlp', 'text_data', 'data_validation']['nlp']
validmind.data_validation.nlp.PunctuationsPunctuationsAnalyzes and visualizes the frequency distribution of punctuation usage in a given text dataset....TrueFalse['dataset']{'count_mode': {'type': '_empty', 'default': 'token'}}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization', 'nlp']
validmind.data_validation.nlp.SentimentSentimentAnalyzes the sentiment of text data within a dataset using the VADER sentiment analysis tool....TrueFalse['dataset']{}['nlp', 'text_data', 'data_validation']['nlp']
validmind.data_validation.nlp.StopWordsStop WordsEvaluates and visualizes the frequency of English stop words in a text dataset against a defined threshold....TrueTrue['dataset']{'min_percent_threshold': {'type': 'float', 'default': 0.5}, 'num_words': {'type': 'int', 'default': 25}}['nlp', 'text_data', 'frequency_analysis', 'visualization']['text_classification', 'text_summarization']
validmind.data_validation.nlp.TextDescriptionText DescriptionConducts comprehensive textual analysis on a dataset using NLTK to evaluate various parameters and generate...TrueFalse['dataset']{'unwanted_tokens': {'type': 'set', 'default': {'s', 'mrs', 'us', \"''\", ' ', 'ms', 'dr', 'dollar', '``', 'mr', \"'s\", \"s'\"}}, 'lang': {'type': 'str', 'default': 'english'}}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.data_validation.nlp.ToxicityToxicityAssesses the toxicity of text data within a dataset to visualize the distribution of toxicity scores....TrueFalse['dataset']{}['nlp', 'text_data', 'data_validation']['nlp']
validmind.model_validation.BertScoreBert ScoreAssesses the quality of machine-generated text using BERTScore metrics and visualizes results through histograms...TrueTrue['dataset', 'model']{'evaluation_model': {'type': '_empty', 'default': 'distilbert-base-uncased'}}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.BleuScoreBleu ScoreEvaluates the quality of machine-generated text using BLEU metrics and visualizes the results through histograms...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.ClusterSizeDistributionCluster Size DistributionAssesses the performance of clustering models by comparing the distribution of cluster sizes in model predictions...TrueFalse['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.ContextualRecallContextual RecallEvaluates a Natural Language Generation model's ability to generate contextually relevant and factually correct...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.FeaturesAUCFeatures AUCEvaluates the discriminatory power of each individual feature within a binary classification model by calculating...TrueFalse['dataset']{'fontsize': {'type': 'int', 'default': 12}, 'figure_height': {'type': 'int', 'default': 500}}['feature_importance', 'AUC', 'visualization']['classification']
validmind.model_validation.MeteorScoreMeteor ScoreAssesses the quality of machine-generated translations by comparing them to human-produced references using the...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.ModelMetadataModel MetadataCompare metadata of different models and generate a summary table with the results....FalseTrue['model']{}['model_training', 'metadata']['regression', 'time_series_forecasting']
validmind.model_validation.ModelPredictionResidualsModel Prediction ResidualsAssesses normality and behavior of residuals in regression models through visualization and statistical tests....TrueTrue['dataset', 'model']{'nbins': {'type': 'int', 'default': 100}, 'p_value_threshold': {'type': 'float', 'default': 0.05}, 'start_date': {'type': None, 'default': None}, 'end_date': {'type': None, 'default': None}}['regression']['residual_analysis', 'visualization']
validmind.model_validation.RegardScoreRegard ScoreAssesses the sentiment and potential biases in text generated by NLP models by computing and visualizing regard...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.RegressionResidualsPlotRegression Residuals PlotEvaluates regression model performance using residual distribution and actual vs. predicted plots....TrueFalse['model', 'dataset']{'bin_size': {'type': 'float', 'default': 0.1}}['model_performance', 'visualization']['regression']
validmind.model_validation.RougeScoreRouge ScoreAssesses the quality of machine-generated text using ROUGE metrics and visualizes the results to provide...TrueTrue['dataset', 'model']{'metric': {'type': 'str', 'default': 'rouge-1'}}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.TimeSeriesPredictionWithCITime Series Prediction With CIAssesses predictive accuracy and uncertainty in time series models, highlighting breaches beyond confidence...TrueTrue['dataset', 'model']{'confidence': {'type': 'float', 'default': 0.95}}['model_predictions', 'visualization']['regression', 'time_series_forecasting']
validmind.model_validation.TimeSeriesPredictionsPlotTime Series Predictions PlotPlot actual vs predicted values for time series data and generate a visual comparison for the model....TrueFalse['dataset', 'model']{}['model_predictions', 'visualization']['regression', 'time_series_forecasting']
validmind.model_validation.TimeSeriesR2SquareBySegmentsTime Series R2 Square By SegmentsEvaluates the R-Squared values of regression models over specified time segments in time series data to assess...TrueTrue['dataset', 'model']{'segments': {'type': None, 'default': None}}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.TokenDisparityToken DisparityEvaluates the token disparity between reference and generated texts, visualizing the results through histograms and...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.ToxicityScoreToxicity ScoreAssesses the toxicity levels of texts generated by NLP models to identify and mitigate harmful or offensive content....TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.embeddings.ClusterDistributionCluster DistributionAssesses the distribution of text embeddings across clusters produced by a model using KMeans clustering....TrueFalse['model', 'dataset']{'num_clusters': {'type': 'int', 'default': 5}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.CosineSimilarityComparisonCosine Similarity ComparisonAssesses the similarity between embeddings generated by different models using Cosine Similarity, providing both...TrueTrue['dataset', 'models']{}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.CosineSimilarityDistributionCosine Similarity DistributionAssesses the similarity between predicted text embeddings from a model using a Cosine Similarity distribution...TrueFalse['dataset', 'model']{}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.CosineSimilarityHeatmapCosine Similarity HeatmapGenerates an interactive heatmap to visualize the cosine similarities among embeddings derived from a given model....TrueFalse['dataset', 'model']{'title': {'type': '_empty', 'default': 'Cosine Similarity Matrix'}, 'color': {'type': '_empty', 'default': 'Cosine Similarity'}, 'xaxis_title': {'type': '_empty', 'default': 'Index'}, 'yaxis_title': {'type': '_empty', 'default': 'Index'}, 'color_scale': {'type': '_empty', 'default': 'Blues'}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.DescriptiveAnalyticsDescriptive AnalyticsEvaluates statistical properties of text embeddings in an ML model via mean, median, and standard deviation...TrueFalse['dataset', 'model']{}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.EmbeddingsVisualization2DEmbeddings Visualization2 DVisualizes 2D representation of text embeddings generated by a model using t-SNE technique....TrueFalse['dataset', 'model']{'cluster_column': {'type': None, 'default': None}, 'perplexity': {'type': 'int', 'default': 30}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.EuclideanDistanceComparisonEuclidean Distance ComparisonAssesses and visualizes the dissimilarity between model embeddings using Euclidean distance, providing insights...TrueTrue['dataset', 'models']{}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.EuclideanDistanceHeatmapEuclidean Distance HeatmapGenerates an interactive heatmap to visualize the Euclidean distances among embeddings derived from a given model....TrueFalse['dataset', 'model']{'title': {'type': '_empty', 'default': 'Euclidean Distance Matrix'}, 'color': {'type': '_empty', 'default': 'Euclidean Distance'}, 'xaxis_title': {'type': '_empty', 'default': 'Index'}, 'yaxis_title': {'type': '_empty', 'default': 'Index'}, 'color_scale': {'type': '_empty', 'default': 'Blues'}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.PCAComponentsPairwisePlotsPCA Components Pairwise PlotsGenerates scatter plots for pairwise combinations of principal component analysis (PCA) components of model...TrueFalse['dataset', 'model']{'n_components': {'type': 'int', 'default': 3}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.StabilityAnalysisKeywordStability Analysis KeywordEvaluates robustness of embedding models to keyword swaps in the test dataset....TrueTrue['dataset', 'model']{'keyword_dict': {'type': None, 'default': None}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.StabilityAnalysisRandomNoiseStability Analysis Random NoiseAssesses the robustness of text embeddings models to random noise introduced via text perturbations....TrueTrue['dataset', 'model']{'probability': {'type': 'float', 'default': 0.02}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.StabilityAnalysisSynonymsStability Analysis SynonymsEvaluates the stability of text embeddings models when words in test data are replaced by their synonyms randomly....TrueTrue['dataset', 'model']{'probability': {'type': 'float', 'default': 0.02}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.StabilityAnalysisTranslationStability Analysis TranslationEvaluates robustness of text embeddings models to noise introduced by translating the original text to another...TrueTrue['dataset', 'model']{'source_lang': {'type': 'str', 'default': 'en'}, 'target_lang': {'type': 'str', 'default': 'fr'}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.TSNEComponentsPairwisePlotsTSNE Components Pairwise PlotsCreates scatter plots for pairwise combinations of t-SNE components to visualize embeddings and highlight potential...TrueFalse['dataset', 'model']{'n_components': {'type': 'int', 'default': 2}, 'perplexity': {'type': 'int', 'default': 30}, 'title': {'type': 'str', 'default': 't-SNE'}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.AnswerCorrectnessAnswer CorrectnessEvaluates the correctness of answers in a dataset with respect to the provided ground...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'response_column': {'type': 'str', 'default': 'response'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.AspectCriticAspect CriticEvaluates generations against the following aspects: harmfulness, maliciousness,...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'response_column': {'type': 'str', 'default': 'response'}, 'retrieved_contexts_column': {'type': None, 'default': None}, 'aspects': {'type': None, 'default': ['coherence', 'conciseness', 'correctness', 'harmfulness', 'maliciousness']}, 'additional_aspects': {'type': None, 'default': None}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'qualitative']['text_summarization', 'text_generation', 'text_qa']
validmind.model_validation.ragas.ContextEntityRecallContext Entity RecallEvaluates the context entity recall for dataset entries and visualizes the results....TrueTrue['dataset']{'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.ContextPrecisionContext PrecisionContext Precision is a metric that evaluates whether all of the ground-truth...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization', 'text_classification']
validmind.model_validation.ragas.ContextPrecisionWithoutReferenceContext Precision Without ReferenceContext Precision Without Reference is a metric used to evaluate the relevance of...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'response_column': {'type': 'str', 'default': 'response'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization', 'text_classification']
validmind.model_validation.ragas.ContextRecallContext RecallContext recall measures the extent to which the retrieved context aligns with the...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization', 'text_classification']
validmind.model_validation.ragas.FaithfulnessFaithfulnessEvaluates the faithfulness of the generated answers with respect to retrieved contexts....TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'response_column': {'type': 'str', 'default': 'response'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'rag_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.NoiseSensitivityNoise SensitivityAssesses the sensitivity of a Large Language Model (LLM) to noise in retrieved context by measuring how often it...TrueTrue['dataset']{'response_column': {'type': 'str', 'default': 'response'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'focus': {'type': 'str', 'default': 'relevant'}, 'user_input_column': {'type': 'str', 'default': 'user_input'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'rag_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.ResponseRelevancyResponse RelevancyAssesses how pertinent the generated answer is to the given prompt....TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': None}, 'response_column': {'type': 'str', 'default': 'response'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'rag_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.SemanticSimilaritySemantic SimilarityCalculates the semantic similarity between generated responses and ground truths...TrueTrue['dataset']{'response_column': {'type': 'str', 'default': 'response'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.sklearn.AdjustedMutualInformationAdjusted Mutual InformationEvaluates clustering model performance by measuring mutual information between true and predicted labels, adjusting...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.AdjustedRandIndexAdjusted Rand IndexMeasures the similarity between two data clusters using the Adjusted Rand Index (ARI) metric in clustering machine...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CalibrationCurveCalibration CurveEvaluates the calibration of probability estimates by comparing predicted probabilities against observed...TrueFalse['model', 'dataset']{'n_bins': {'type': 'int', 'default': 10}}['sklearn', 'model_performance', 'classification']['classification']
validmind.model_validation.sklearn.ClassifierPerformanceClassifier PerformanceEvaluates performance of binary or multiclass classification models using precision, recall, F1-Score, accuracy,...FalseTrue['dataset', 'model']{'average': {'type': 'str', 'default': 'macro'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ClassifierThresholdOptimizationClassifier Threshold OptimizationAnalyzes and visualizes different threshold optimization methods for binary classification models....FalseTrue['dataset', 'model']{'methods': {'type': None, 'default': None}, 'target_recall': {'type': None, 'default': None}}['model_validation', 'threshold_optimization', 'classification_metrics']['classification']
validmind.model_validation.sklearn.ClusterCosineSimilarityCluster Cosine SimilarityMeasures the intra-cluster similarity of a clustering model using cosine similarity....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ClusterPerformanceMetricsCluster Performance MetricsEvaluates the performance of clustering machine learning models using multiple established metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CompletenessScoreCompleteness ScoreEvaluates a clustering model's capacity to categorize instances from a single class into the same cluster....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.FeatureImportanceFeature ImportanceCompute feature importance scores for a given model and generate a summary table...FalseTrue['dataset', 'model']{'num_features': {'type': 'int', 'default': 3}}['model_explainability', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.FowlkesMallowsScoreFowlkes Mallows ScoreEvaluates the similarity between predicted and actual cluster assignments in a model using the Fowlkes-Mallows...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HomogeneityScoreHomogeneity ScoreAssesses clustering homogeneity by comparing true and predicted labels, scoring from 0 (heterogeneous) to 1...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HyperParametersTuningHyper Parameters TuningPerforms exhaustive grid search over specified parameter ranges to find optimal model configurations...FalseTrue['model', 'dataset']{'param_grid': {'type': 'dict', 'default': None}, 'scoring': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}, 'fit_params': {'type': 'dict', 'default': None}}['sklearn', 'model_performance']['clustering', 'classification']
validmind.model_validation.sklearn.KMeansClustersOptimizationK Means Clusters OptimizationOptimizes the number of clusters in K-means models using Elbow and Silhouette methods....TrueFalse['model', 'dataset']{'n_clusters': {'type': None, 'default': None}}['sklearn', 'model_performance', 'kmeans']['clustering']
validmind.model_validation.sklearn.MinimumAccuracyMinimum AccuracyChecks if the model's prediction accuracy meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.7}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumF1ScoreMinimum F1 ScoreAssesses if the model's F1 score on the validation set meets a predefined minimum threshold, ensuring balanced...FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumROCAUCScoreMinimum ROCAUC ScoreValidates model by checking if the ROC AUC score meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ModelParametersModel ParametersExtracts and displays model parameters in a structured format for transparency and reproducibility....FalseTrue['model']{'model_params': {'type': None, 'default': None}}['model_training', 'metadata']['classification', 'regression']
validmind.model_validation.sklearn.ModelsPerformanceComparisonModels Performance ComparisonEvaluates and compares the performance of multiple Machine Learning models using various metrics like accuracy,...FalseTrue['dataset', 'models']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'model_comparison']['classification', 'text_classification']
validmind.model_validation.sklearn.OverfitDiagnosisOverfit DiagnosisAssesses potential overfitting in a model's predictions, identifying regions where performance between training and...TrueTrue['model', 'datasets']{'metric': {'type': 'str', 'default': None}, 'cut_off_threshold': {'type': 'float', 'default': 0.04}}['sklearn', 'binary_classification', 'multiclass_classification', 'linear_regression', 'model_diagnosis']['classification', 'regression']
validmind.model_validation.sklearn.PermutationFeatureImportancePermutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['model', 'dataset']{'fontsize': {'type': None, 'default': None}, 'figure_height': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PopulationStabilityIndexPopulation Stability IndexAssesses the Population Stability Index (PSI) to quantify the stability of an ML model's predictions across...TrueTrue['datasets', 'model']{'num_bins': {'type': 'int', 'default': 10}, 'mode': {'type': 'str', 'default': 'fixed'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.RegressionErrorsRegression ErrorsAssesses the performance and error distribution of a regression model using various error metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression', 'classification']
validmind.model_validation.sklearn.RegressionErrorsComparisonRegression Errors ComparisonAssesses multiple regression error metrics to compare model performance across different datasets, emphasizing...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RegressionPerformanceRegression PerformanceEvaluates the performance of a regression model using five different metrics: MAE, MSE, RMSE, MAPE, and MBD....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareRegression R2 SquareAssesses the overall goodness-of-fit of a regression model by evaluating R-squared (R2) and Adjusted R-squared (Adj...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareComparisonRegression R2 Square ComparisonCompares R-Squared and Adjusted R-Squared values for different regression models across multiple datasets to assess...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RobustnessDiagnosisRobustness DiagnosisAssesses the robustness of a machine learning model by evaluating performance decay under noisy conditions....TrueTrue['datasets', 'model']{'metric': {'type': 'str', 'default': None}, 'scaling_factor_std_dev_list': {'type': None, 'default': [0.1, 0.2, 0.3, 0.4, 0.5]}, 'performance_decay_threshold': {'type': 'float', 'default': 0.05}}['sklearn', 'model_diagnosis', 'visualization']['classification', 'regression']
validmind.model_validation.sklearn.SHAPGlobalImportanceSHAP Global ImportanceEvaluates and visualizes global feature importance using SHAP values for model explanation and risk identification....FalseTrue['model', 'dataset']{'kernel_explainer_samples': {'type': 'int', 'default': 10}, 'tree_or_linear_explainer_samples': {'type': 'int', 'default': 200}, 'class_of_interest': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ScoreProbabilityAlignmentScore Probability AlignmentAnalyzes the alignment between credit scores and predicted probabilities....TrueTrue['model', 'dataset']{'score_column': {'type': 'str', 'default': 'score'}, 'n_bins': {'type': 'int', 'default': 10}}['visualization', 'credit_risk', 'calibration']['classification']
validmind.model_validation.sklearn.SilhouettePlotSilhouette PlotCalculates and visualizes Silhouette Score, assessing the degree of data point suitability to its cluster in ML...TrueTrue['model', 'dataset']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.VMeasureV MeasureEvaluates homogeneity and completeness of a clustering model using the V Measure Score....FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.WeakspotsDiagnosisWeakspots DiagnosisIdentifies and visualizes weak spots in a machine learning model's performance across various sections of the...TrueTrue['datasets', 'model']{'features_columns': {'type': None, 'default': None}, 'metrics': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_diagnosis', 'visualization']['classification', 'text_classification']
validmind.model_validation.statsmodels.AutoARIMAAuto ARIMAEvaluates ARIMA models for time-series forecasting, ranking them using Bayesian and Akaike Information Criteria....FalseTrue['model', 'dataset']{}['time_series_data', 'forecasting', 'model_selection', 'statsmodels']['regression']
validmind.model_validation.statsmodels.CumulativePredictionProbabilitiesCumulative Prediction ProbabilitiesVisualizes cumulative probabilities of positive and negative classes for both training and testing in classification models....TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Cumulative Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.DurbinWatsonTestDurbin Watson TestAssesses autocorrelation in time series data features using the Durbin-Watson statistic....FalseTrue['dataset', 'model']{'threshold': {'type': None, 'default': [1.5, 2.5]}}['time_series_data', 'forecasting', 'statistical_test', 'statsmodels']['regression']
validmind.model_validation.statsmodels.GINITableGINI TableEvaluates classification model performance using AUC, GINI, and KS metrics for training and test datasets....FalseTrue['dataset', 'model']{}['model_performance']['classification']
validmind.model_validation.statsmodels.KolmogorovSmirnovKolmogorov SmirnovAssesses whether each feature in the dataset aligns with a normal distribution using the Kolmogorov-Smirnov test....FalseTrue['model', 'dataset']{'dist': {'type': 'str', 'default': 'norm'}}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.LillieforsLillieforsAssesses the normality of feature distributions in an ML model's training dataset using the Lilliefors test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.PredictionProbabilitiesHistogramPrediction Probabilities HistogramAssesses the predictive probability distribution for binary classification to evaluate model performance and...TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Histogram of Predictive Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.RegressionCoeffsRegression CoeffsAssesses the significance and uncertainty of predictor variables in a regression model through visualization of...TrueTrue['model']{}['tabular_data', 'visualization', 'model_training']['regression']
validmind.model_validation.statsmodels.RegressionFeatureSignificanceRegression Feature SignificanceAssesses and visualizes the statistical significance of features in a regression model....TrueFalse['model']{'fontsize': {'type': 'int', 'default': 10}, 'p_threshold': {'type': 'float', 'default': 0.05}}['statistical_test', 'model_interpretation', 'visualization', 'feature_importance']['regression']
validmind.model_validation.statsmodels.RegressionModelForecastPlotRegression Model Forecast PlotGenerates plots to visually compare the forecasted outcomes of a regression model against actual observed values over...TrueFalse['model', 'dataset']{'start_date': {'type': None, 'default': None}, 'end_date': {'type': None, 'default': None}}['time_series_data', 'forecasting', 'visualization']['regression']
validmind.model_validation.statsmodels.RegressionModelForecastPlotLevelsRegression Model Forecast Plot LevelsAssesses the alignment between forecasted and observed values in regression models through visual plots...TrueFalse['model', 'dataset']{}['time_series_data', 'forecasting', 'visualization']['regression']
validmind.model_validation.statsmodels.RegressionModelSensitivityPlotRegression Model Sensitivity PlotAssesses the sensitivity of a regression model to changes in independent variables by applying shocks and...TrueFalse['dataset', 'model']{'shocks': {'type': None, 'default': [0.1]}, 'transformation': {'type': None, 'default': None}}['senstivity_analysis', 'visualization']['regression']
validmind.model_validation.statsmodels.RegressionModelSummaryRegression Model SummaryEvaluates regression model performance using metrics including R-Squared, Adjusted R-Squared, MSE, and RMSE....FalseTrue['dataset', 'model']{}['model_performance', 'regression']['regression']
validmind.model_validation.statsmodels.RegressionPermutationFeatureImportanceRegression Permutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['dataset', 'model']{'fontsize': {'type': 'int', 'default': 12}, 'figure_height': {'type': 'int', 'default': 500}}['statsmodels', 'feature_importance', 'visualization']['regression']
validmind.model_validation.statsmodels.ScorecardHistogramScorecard HistogramThe Scorecard Histogram test evaluates the distribution of credit scores between default and non-default instances,...TrueFalse['dataset']{'title': {'type': 'str', 'default': 'Histogram of Scores'}, 'score_column': {'type': 'str', 'default': 'score'}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassDiscriminationDriftClass Discrimination DriftCompares classification discrimination metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassImbalanceDriftClass Imbalance DriftEvaluates drift in class distribution between reference and monitoring datasets....TrueTrue['datasets']{'drift_pct_threshold': {'type': 'float', 'default': 5.0}, 'title': {'type': 'str', 'default': 'Class Distribution Drift'}}['tabular_data', 'binary_classification', 'multiclass_classification']['classification']
validmind.ongoing_monitoring.ClassificationAccuracyDriftClassification Accuracy DriftCompares classification accuracy metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ConfusionMatrixDriftConfusion Matrix DriftCompares confusion matrix metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.CumulativePredictionProbabilitiesDriftCumulative Prediction Probabilities DriftCompares cumulative prediction probability distributions between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.FeatureDriftFeature DriftEvaluates changes in feature distribution over time to identify potential model drift....TrueTrue['datasets']{'bins': {'type': '_empty', 'default': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}, 'feature_columns': {'type': '_empty', 'default': None}, 'psi_threshold': {'type': '_empty', 'default': 0.2}}['visualization']['monitoring']
validmind.ongoing_monitoring.PredictionAcrossEachFeaturePrediction Across Each FeatureAssesses differences in model predictions across individual features between reference and monitoring datasets...TrueFalse['datasets', 'model']{}['visualization']['monitoring']
validmind.ongoing_monitoring.PredictionCorrelationPrediction CorrelationAssesses correlation changes between model predictions from reference and monitoring datasets to detect potential...TrueTrue['datasets', 'model']{'drift_pct_threshold': {'type': 'float', 'default': 20}}['visualization']['monitoring']
validmind.ongoing_monitoring.PredictionProbabilitiesHistogramDriftPrediction Probabilities Histogram DriftCompares prediction probability distributions between reference and monitoring datasets....TrueTrue['datasets', 'model']{'title': {'type': '_empty', 'default': 'Prediction Probabilities Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.PredictionQuantilesAcrossFeaturesPrediction Quantiles Across FeaturesAssesses differences in model prediction distributions across individual features between reference...TrueFalse['datasets', 'model']{}['visualization']['monitoring']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ScoreBandsDriftScore Bands DriftAnalyzes drift in population distribution and default rates across score bands....FalseTrue['datasets', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}, 'drift_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.ongoing_monitoring.ScorecardHistogramDriftScorecard Histogram DriftCompares score distributions between reference and monitoring datasets for each class....TrueTrue['datasets']{'score_column': {'type': 'str', 'default': 'score'}, 'title': {'type': 'str', 'default': 'Scorecard Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.ongoing_monitoring.TargetPredictionDistributionPlotTarget Prediction Distribution PlotAssesses differences in prediction distributions between a reference dataset and a monitoring dataset to identify...TrueTrue['datasets', 'model']{'drift_pct_threshold': {'type': 'float', 'default': 20}}['visualization']['monitoring']
validmind.prompt_validation.BiasBiasAssesses potential bias in a Large Language Model by analyzing the distribution and order of exemplars in the...FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.ClarityClarityEvaluates and scores the clarity of prompts in a Large Language Model based on specified guidelines....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.ConcisenessConcisenessAnalyzes and grades the conciseness of prompts provided to a Large Language Model....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.DelimitationDelimitationEvaluates the proper use of delimiters in prompts provided to Large Language Models....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.NegativeInstructionNegative InstructionEvaluates and grades the use of affirmative, proactive language over negative instructions in LLM prompts....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.RobustnessRobustnessAssesses the robustness of prompts provided to a Large Language Model under varying conditions and contexts. This test...FalseTrue['model', 'dataset']{'num_tests': {'type': '_empty', 'default': 10}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.SpecificitySpecificityEvaluates and scores the specificity of prompts provided to a Large Language Model (LLM), based on clarity, detail,...FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.unit_metrics.classification.AccuracyAccuracyCalculates the accuracy of a modelFalseFalse['dataset', 'model']{}['classification']['classification']
validmind.unit_metrics.classification.F1F1Calculates the F1 score for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.PrecisionPrecisionCalculates the precision for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.ROC_AUCROC AUCCalculates the ROC AUC for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.RecallRecallCalculates the recall for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.regression.AdjustedRSquaredScoreAdjusted R Squared ScoreCalculates the adjusted R-squared score for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.GiniCoefficientGini CoefficientCalculates the Gini coefficient for a regression model.FalseFalse['dataset', 'model']{}['regression']['regression']
validmind.unit_metrics.regression.HuberLossHuber LossCalculates the Huber loss for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.KolmogorovSmirnovStatisticKolmogorov Smirnov StatisticCalculates the Kolmogorov-Smirnov statistic for a regression model.FalseFalse['dataset', 'model']{}['regression']['regression']
validmind.unit_metrics.regression.MeanAbsoluteErrorMean Absolute ErrorCalculates the mean absolute error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.MeanAbsolutePercentageErrorMean Absolute Percentage ErrorCalculates the mean absolute percentage error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.MeanBiasDeviationMean Bias DeviationCalculates the mean bias deviation for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.MeanSquaredErrorMean Squared ErrorCalculates the mean squared error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.QuantileLossQuantile LossCalculates the quantile loss for a regression model.FalseFalse['model', 'dataset']{'quantile': {'type': '_empty', 'default': 0.5}}['regression']['regression']
validmind.unit_metrics.regression.RSquaredScoreR Squared ScoreCalculates the R-squared score for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.RootMeanSquaredErrorRoot Mean Squared ErrorCalculates the root mean squared error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "list_tests()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Understand tags and task types\n", - "\n", - "Use [list_tasks()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tasks) to view all unique task types used to classify tests in the ValidMind Library.\n", - "\n", - "Understanding `task` types helps you filter tests that match your model’s objective. For example:\n", - "\n", - "- **classification:** Works with Classification Models and Datasets.\n", - "- **regression:** Works with Regression Models and Datasets.\n", - "- **text classification:** Works with Text Classification Models and Datasets.\n", - "- **text summarization:** Works with Text Summarization Models and Datasets." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['text_qa',\n", - " 'classification',\n", - " 'data_validation',\n", - " 'text_classification',\n", - " 'feature_extraction',\n", - " 'regression',\n", - " 'visualization',\n", - " 'clustering',\n", - " 'time_series_forecasting',\n", - " 'text_summarization',\n", - " 'nlp',\n", - " 'residual_analysis',\n", - " 'monitoring',\n", - " 'text_generation']" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "list_tasks()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use [list_tags()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tags) to view all unique tags used to describe tests in the ValidMind Library.\n", - "\n", - "`Tags` describe what a test applies to and help you filter tests for your use case. Examples include:\n", - "\n", - "- **llm:** Tests that work with Large Language Models.\n", - "- **nlp:** Tests relevant for natural language processing.\n", - "- **binary_classification:** Tests for binary classification tasks.\n", - "- **forecasting:** Tests for forecasting and time-series analysis.\n", - "- **tabular_data:** Tests for tabular data like CSVs and Excel spreadsheets.\n", - "\n" - ] - }, + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Explore tests\n", + "\n", + "Explore the individual out-the-box tests available in the ValidMind Library, and identify which tests to run to evaluate different aspects of your model. Browse available tests, view their descriptions, and filter by tags or task type to find tests relevant to your use case.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [About ValidMind](#toc1_) \n", + " - [Before you begin](#toc1_1_) \n", + " - [New to ValidMind?](#toc1_2_) \n", + " - [Key concepts](#toc1_3_) \n", + "- [Install the ValidMind Library](#toc2_) \n", + "- [List all available tests](#toc3_) \n", + "- [Understand tags and task types](#toc4_) \n", + "- [Filter tests by tags and task types](#toc5_)\n", + "- [Store test sets for use](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Discover more learning resources](#toc7_1_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models.\n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators.\n", + "\n", + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html).\n", + "\n", + "\n", + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, you'll need access to a ValidMind account.\n", + "

\n", + "Register with ValidMind
\n", + "\n", + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Custom tests**: Custom tests are functions that you define to evaluate your model or dataset. These functions can be registered via the ValidMind Library to be used with the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom test.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom test. See this [example](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html) for more information.\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a test, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom tests can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Install the ValidMind Library\n", + "\n", + "
Recommended Python versions\n", + "

\n", + "Python 3.8 <= x <= 3.11
\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## List all available tests\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "Start by importing the functions from the [validmind.tests](https://docs.validmind.ai/validmind/validmind/tests.html) module for listing tests, listing tasks, listing tags, and listing tasks and tags to access these functions in the rest of this notebook:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.tests import (\n", + " list_tests,\n", + " list_tasks,\n", + " list_tags,\n", + " list_tasks_and_tags,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use [list_tests()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) to retrieve all available ValidMind tests, which returns a DataFrame with the following columns:\n", + "\n", + "- **ID** – A unique identifier for each test.\n", + "- **Name** – The test’s name.\n", + "- **Description** – A short summary of what the test evaluates.\n", + "- **Tags** – Keywords that describe what the test does or applies to.\n", + "- **Tasks** – The type of modeling task the test supports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['senstivity_analysis',\n", - " 'calibration',\n", - " 'clustering',\n", - " 'anomaly_detection',\n", - " 'nlp',\n", - " 'classification_metrics',\n", - " 'dimensionality_reduction',\n", - " 'tabular_data',\n", - " 'time_series_data',\n", - " 'model_predictions',\n", - " 'feature_selection',\n", - " 'correlation',\n", - " 'frequency_analysis',\n", - " 'embeddings',\n", - " 'regression',\n", - " 'llm',\n", - " 'statsmodels',\n", - " 'ragas',\n", - " 'model_performance',\n", - " 'model_validation',\n", - " 'rag_performance',\n", - " 'model_training',\n", - " 'qualitative',\n", - " 'classification',\n", - " 'kmeans',\n", - " 'multiclass_classification',\n", - " 'linear_regression',\n", - " 'data_quality',\n", - " 'text_data',\n", - " 'binary_classification',\n", - " 'threshold_optimization',\n", - " 'stationarity',\n", - " 'bias_and_fairness',\n", - " 'scorecard',\n", - " 'model_explainability',\n", - " 'model_comparison',\n", - " 'numerical_data',\n", - " 'sklearn',\n", - " 'model_selection',\n", - " 'retrieval_performance',\n", - " 'zero_shot',\n", - " 'statistical_test',\n", - " 'descriptive_statistics',\n", - " 'seasonality',\n", - " 'analysis',\n", - " 'data_validation',\n", - " 'data_distribution',\n", - " 'feature_importance',\n", - " 'metadata',\n", - " 'few_shot',\n", - " 'visualization',\n", - " 'credit_risk',\n", - " 'forecasting',\n", - " 'AUC',\n", - " 'logistic_regression',\n", - " 'model_diagnosis',\n", - " 'model_interpretation',\n", - " 'unit_root_test',\n", - " 'categorical_data',\n", - " 'data_analysis']" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.data_validation.ACFandPACFPlotAC Fand PACF PlotAnalyzes time series data using Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF) plots to...TrueFalse['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'visualization']['regression']
validmind.data_validation.ADFADFAssesses the stationarity of a time series dataset using the Augmented Dickey-Fuller (ADF) test....FalseTrue['dataset']{}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test', 'stationarity']['regression']
validmind.data_validation.AutoARAuto ARAutomatically identifies the optimal Autoregressive (AR) order for a time series using BIC and AIC criteria....FalseTrue['dataset']{'max_ar_order': {'type': 'int', 'default': 3}}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test']['regression']
validmind.data_validation.AutoMAAuto MAAutomatically selects the optimal Moving Average (MA) order for each variable in a time series dataset based on...FalseTrue['dataset']{'max_ma_order': {'type': 'int', 'default': 3}}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test']['regression']
validmind.data_validation.AutoStationarityAuto StationarityAutomates Augmented Dickey-Fuller test to assess stationarity across multiple time series in a DataFrame....FalseTrue['dataset']{'max_order': {'type': 'int', 'default': 5}, 'threshold': {'type': 'float', 'default': 0.05}}['time_series_data', 'statsmodels', 'forecasting', 'statistical_test']['regression']
validmind.data_validation.BivariateScatterPlotsBivariate Scatter PlotsGenerates bivariate scatterplots to visually inspect relationships between pairs of numerical predictor variables...TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'visualization']['classification']
validmind.data_validation.BoxPierceBox PierceDetects autocorrelation in time-series data through the Box-Pierce test to validate model performance....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'statsmodels']['regression']
validmind.data_validation.ChiSquaredFeaturesTableChi Squared Features TableAssesses the statistical association between categorical features and a target variable using the Chi-Squared test....FalseTrue['dataset']{'p_threshold': {'type': '_empty', 'default': 0.05}}['tabular_data', 'categorical_data', 'statistical_test']['classification']
validmind.data_validation.ClassImbalanceClass ImbalanceEvaluates and quantifies class distribution imbalance in a dataset used by a machine learning model....TrueTrue['dataset']{'min_percent_threshold': {'type': 'int', 'default': 10}}['tabular_data', 'binary_classification', 'multiclass_classification', 'data_quality']['classification']
validmind.data_validation.DatasetDescriptionDataset DescriptionProvides comprehensive analysis and statistical summaries of each column in a machine learning model's dataset....FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DatasetSplitDataset SplitEvaluates and visualizes the distribution proportions among training, testing, and validation datasets of an ML...FalseTrue['datasets']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DescriptiveStatisticsDescriptive StatisticsPerforms a detailed descriptive statistical analysis of both numerical and categorical data within a model's...FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'data_quality']['classification', 'regression']
validmind.data_validation.DickeyFullerGLSDickey Fuller GLSAssesses stationarity in time series data using the Dickey-Fuller GLS test to determine the order of integration....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'unit_root_test']['regression']
validmind.data_validation.DuplicatesDuplicatesTests dataset for duplicate entries, ensuring model reliability via data quality verification....FalseTrue['dataset']{'min_threshold': {'type': '_empty', 'default': 1}}['tabular_data', 'data_quality', 'text_data']['classification', 'regression']
validmind.data_validation.EngleGrangerCointEngle Granger CointAssesses the degree of co-movement between pairs of time series data using the Engle-Granger cointegration test....FalseTrue['dataset']{'threshold': {'type': 'float', 'default': 0.05}}['time_series_data', 'statistical_test', 'forecasting']['regression']
validmind.data_validation.FeatureTargetCorrelationPlotFeature Target Correlation PlotVisualizes the correlation between input features and the model's target output in a color-coded horizontal bar...TrueFalse['dataset']{'fig_height': {'type': '_empty', 'default': 600}}['tabular_data', 'visualization', 'correlation']['classification', 'regression']
validmind.data_validation.HighCardinalityHigh CardinalityAssesses the number of unique values in categorical columns to detect high cardinality and potential overfitting....FalseTrue['dataset']{'num_threshold': {'type': 'int', 'default': 100}, 'percent_threshold': {'type': 'float', 'default': 0.1}, 'threshold_type': {'type': 'str', 'default': 'percent'}}['tabular_data', 'data_quality', 'categorical_data']['classification', 'regression']
validmind.data_validation.HighPearsonCorrelationHigh Pearson CorrelationIdentifies highly correlated feature pairs in a dataset suggesting feature redundancy or multicollinearity....FalseTrue['dataset']{'max_threshold': {'type': 'float', 'default': 0.3}, 'top_n_correlations': {'type': 'int', 'default': 10}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'data_quality', 'correlation']['classification', 'regression']
validmind.data_validation.IQROutliersBarPlotIQR Outliers Bar PlotVisualizes outlier distribution across percentiles in numerical data using the Interquartile Range (IQR) method....TrueFalse['dataset']{'threshold': {'type': 'float', 'default': 1.5}, 'fig_width': {'type': 'int', 'default': 800}}['tabular_data', 'visualization', 'numerical_data']['classification', 'regression']
validmind.data_validation.IQROutliersTableIQR Outliers TableDetermines and summarizes outliers in numerical features using the Interquartile Range method....FalseTrue['dataset']{'threshold': {'type': 'float', 'default': 1.5}}['tabular_data', 'numerical_data']['classification', 'regression']
validmind.data_validation.IsolationForestOutliersIsolation Forest OutliersDetects outliers in a dataset using the Isolation Forest algorithm and visualizes results through scatter plots....TrueFalse['dataset']{'random_state': {'type': 'int', 'default': 0}, 'contamination': {'type': 'float', 'default': 0.1}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'anomaly_detection']['classification']
validmind.data_validation.JarqueBeraJarque BeraAssesses normality of dataset features in an ML model using the Jarque-Bera test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.KPSSKPSSAssesses the stationarity of time-series data in a machine learning model using the KPSS unit root test....FalseTrue['dataset']{}['time_series_data', 'stationarity', 'unit_root_test', 'statsmodels']['data_validation']
validmind.data_validation.LJungBoxL Jung BoxAssesses autocorrelations in dataset features by performing a Ljung-Box test on each feature....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'statsmodels']['regression']
validmind.data_validation.LaggedCorrelationHeatmapLagged Correlation HeatmapAssesses and visualizes correlation between target variable and lagged independent variables in a time-series...TrueFalse['dataset']{'num_lags': {'type': 'int', 'default': 10}}['time_series_data', 'visualization']['regression']
validmind.data_validation.MissingValuesMissing ValuesEvaluates dataset quality by ensuring missing value ratio across all features does not exceed a set threshold....FalseTrue['dataset']{'min_threshold': {'type': 'int', 'default': 1}}['tabular_data', 'data_quality']['classification', 'regression']
validmind.data_validation.MissingValuesBarPlotMissing Values Bar PlotAssesses the percentage and distribution of missing values in the dataset via a bar plot, with emphasis on...TrueFalse['dataset']{'threshold': {'type': 'int', 'default': 80}, 'fig_height': {'type': 'int', 'default': 600}}['tabular_data', 'data_quality', 'visualization']['classification', 'regression']
validmind.data_validation.MutualInformationMutual InformationCalculates mutual information scores between features and target variable to evaluate feature relevance....TrueFalse['dataset']{'min_threshold': {'type': 'float', 'default': 0.01}, 'task': {'type': 'str', 'default': 'classification'}}['feature_selection', 'data_analysis']['classification', 'regression']
validmind.data_validation.PearsonCorrelationMatrixPearson Correlation MatrixEvaluates linear dependency between numerical variables in a dataset via a Pearson Correlation coefficient heat map....TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'correlation']['classification', 'regression']
validmind.data_validation.PhillipsPerronArchPhillips Perron ArchAssesses the stationarity of time series data in each feature of the ML model using the Phillips-Perron test....FalseTrue['dataset']{}['time_series_data', 'forecasting', 'statistical_test', 'unit_root_test']['regression']
validmind.data_validation.ProtectedClassesDescriptionProtected Classes DescriptionVisualizes the distribution of protected classes in the dataset relative to the target variable...TrueTrue['dataset']{'protected_classes': {'type': '_empty', 'default': None}}['bias_and_fairness', 'descriptive_statistics']['classification', 'regression']
validmind.data_validation.RollingStatsPlotRolling Stats PlotEvaluates the stationarity of time series data by plotting its rolling mean and standard deviation over a specified...TrueFalse['dataset']{'window_size': {'type': 'int', 'default': 12}}['time_series_data', 'visualization', 'stationarity']['regression']
validmind.data_validation.RunsTestRuns TestExecutes Runs Test on ML model to detect non-random patterns in output data sequence....FalseTrue['dataset']{}['tabular_data', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.ScatterPlotScatter PlotAssesses visual relationships, patterns, and outliers among features in a dataset through scatter plot matrices....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.ScoreBandDefaultRatesScore Band Default RatesAnalyzes default rates and population distribution across credit score bands....FalseTrue['dataset', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.data_validation.SeasonalDecomposeSeasonal DecomposeAssesses patterns and seasonality in a time series dataset by decomposing its features into foundational components....TrueFalse['dataset']{'seasonal_model': {'type': 'str', 'default': 'additive'}}['time_series_data', 'seasonality', 'statsmodels']['regression']
validmind.data_validation.ShapiroWilkShapiro WilkEvaluates feature-wise normality of training data using the Shapiro-Wilk test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test']['classification', 'regression']
validmind.data_validation.SkewnessSkewnessEvaluates the skewness of numerical data in a dataset to check against a defined threshold, aiming to ensure data...FalseTrue['dataset']{'max_threshold': {'type': '_empty', 'default': 1}}['data_quality', 'tabular_data']['classification', 'regression']
validmind.data_validation.SpreadPlotSpread PlotAssesses potential correlations between pairs of time series variables through visualization to enhance...TrueFalse['dataset']{}['time_series_data', 'visualization']['regression']
validmind.data_validation.TabularCategoricalBarPlotsTabular Categorical Bar PlotsGenerates and visualizes bar plots for each category in categorical features to evaluate the dataset's composition....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDateTimeHistogramsTabular Date Time HistogramsGenerates histograms to provide graphical insight into the distribution of time intervals in a model's datetime...TrueFalse['dataset']{}['time_series_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDescriptionTablesTabular Description TablesSummarizes key descriptive statistics for numerical, categorical, and datetime variables in a dataset....FalseTrue['dataset']{}['tabular_data']['classification', 'regression']
validmind.data_validation.TabularNumericalHistogramsTabular Numerical HistogramsGenerates histograms for each numerical feature in a dataset to provide visual insights into data distribution and...TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TargetRateBarPlotsTarget Rate Bar PlotsGenerates bar plots visualizing the default rates of categorical features for a classification machine learning...TrueFalse['dataset']{}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.TimeSeriesDescriptionTime Series DescriptionGenerates a detailed analysis for the provided time series dataset, summarizing key statistics to identify trends,...FalseTrue['dataset']{}['time_series_data', 'analysis']['regression']
validmind.data_validation.TimeSeriesDescriptiveStatisticsTime Series Descriptive StatisticsEvaluates the descriptive statistics of a time series dataset to identify trends, patterns, and data quality issues....FalseTrue['dataset']{}['time_series_data', 'analysis']['regression']
validmind.data_validation.TimeSeriesFrequencyTime Series FrequencyEvaluates consistency of time series data frequency and generates a frequency plot....TrueTrue['dataset']{}['time_series_data']['regression']
validmind.data_validation.TimeSeriesHistogramTime Series HistogramVisualizes distribution of time-series data using histograms and Kernel Density Estimation (KDE) lines....TrueFalse['dataset']{'nbins': {'type': '_empty', 'default': 30}}['data_validation', 'visualization', 'time_series_data']['regression', 'time_series_forecasting']
validmind.data_validation.TimeSeriesLinePlotTime Series Line PlotGenerates and analyses time-series data through line plots revealing trends, patterns, anomalies over time....TrueFalse['dataset']{}['time_series_data', 'visualization']['regression']
validmind.data_validation.TimeSeriesMissingValuesTime Series Missing ValuesValidates time-series data quality by confirming the count of missing values is below a certain threshold....TrueTrue['dataset']{'min_threshold': {'type': 'int', 'default': 1}}['time_series_data']['regression']
validmind.data_validation.TimeSeriesOutliersTime Series OutliersIdentifies and visualizes outliers in time-series data using the z-score method....FalseTrue['dataset']{'zscore_threshold': {'type': 'int', 'default': 3}}['time_series_data']['regression']
validmind.data_validation.TooManyZeroValuesToo Many Zero ValuesIdentifies numerical columns in a dataset that contain an excessive number of zero values, defined by a threshold...FalseTrue['dataset']{'max_percent_threshold': {'type': 'float', 'default': 0.03}}['tabular_data']['regression', 'classification']
validmind.data_validation.UniqueRowsUnique RowsVerifies the diversity of the dataset by ensuring that the count of unique rows exceeds a prescribed threshold....FalseTrue['dataset']{'min_percent_threshold': {'type': 'float', 'default': 1}}['tabular_data']['regression', 'classification']
validmind.data_validation.WOEBinPlotsWOE Bin PlotsGenerates visualizations of Weight of Evidence (WoE) and Information Value (IV) for understanding predictive power...TrueFalse['dataset']{'breaks_adj': {'type': 'list', 'default': None}, 'fig_height': {'type': 'int', 'default': 600}, 'fig_width': {'type': 'int', 'default': 500}}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.WOEBinTableWOE Bin TableAssesses the Weight of Evidence (WoE) and Information Value (IV) of each feature to evaluate its predictive power...FalseTrue['dataset']{'breaks_adj': {'type': 'list', 'default': None}}['tabular_data', 'categorical_data']['classification']
validmind.data_validation.ZivotAndrewsArchZivot Andrews ArchEvaluates the order of integration and stationarity of time series data using the Zivot-Andrews unit root test....FalseTrue['dataset']{}['time_series_data', 'stationarity', 'unit_root_test']['regression']
validmind.data_validation.nlp.CommonWordsCommon WordsAssesses the most frequent non-stopwords in a text column for identifying prevalent language patterns....TrueFalse['dataset']{}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization']
validmind.data_validation.nlp.HashtagsHashtagsAssesses hashtag frequency in a text column, highlighting usage trends and potential dataset bias or spam....TrueFalse['dataset']{'top_hashtags': {'type': 'int', 'default': 25}}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization']
validmind.data_validation.nlp.LanguageDetectionLanguage DetectionAssesses the diversity of languages in a textual dataset by detecting and visualizing the distribution of languages....TrueFalse['dataset']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.data_validation.nlp.MentionsMentionsCalculates and visualizes frequencies of '@' prefixed mentions in a text-based dataset for NLP model analysis....TrueFalse['dataset']{'top_mentions': {'type': 'int', 'default': 25}}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization']
validmind.data_validation.nlp.PolarityAndSubjectivityPolarity And SubjectivityAnalyzes the polarity and subjectivity of text data within a given dataset to visualize the sentiment distribution....TrueTrue['dataset']{'threshold_subjectivity': {'type': '_empty', 'default': 0.5}, 'threshold_polarity': {'type': '_empty', 'default': 0}}['nlp', 'text_data', 'data_validation']['nlp']
validmind.data_validation.nlp.PunctuationsPunctuationsAnalyzes and visualizes the frequency distribution of punctuation usage in a given text dataset....TrueFalse['dataset']{'count_mode': {'type': '_empty', 'default': 'token'}}['nlp', 'text_data', 'visualization', 'frequency_analysis']['text_classification', 'text_summarization', 'nlp']
validmind.data_validation.nlp.SentimentSentimentAnalyzes the sentiment of text data within a dataset using the VADER sentiment analysis tool....TrueFalse['dataset']{}['nlp', 'text_data', 'data_validation']['nlp']
validmind.data_validation.nlp.StopWordsStop WordsEvaluates and visualizes the frequency of English stop words in a text dataset against a defined threshold....TrueTrue['dataset']{'min_percent_threshold': {'type': 'float', 'default': 0.5}, 'num_words': {'type': 'int', 'default': 25}}['nlp', 'text_data', 'frequency_analysis', 'visualization']['text_classification', 'text_summarization']
validmind.data_validation.nlp.TextDescriptionText DescriptionConducts comprehensive textual analysis on a dataset using NLTK to evaluate various parameters and generate...TrueFalse['dataset']{'unwanted_tokens': {'type': 'set', 'default': {'s', 'mrs', 'us', \"''\", ' ', 'ms', 'dr', 'dollar', '``', 'mr', \"'s\", \"s'\"}}, 'lang': {'type': 'str', 'default': 'english'}}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.data_validation.nlp.ToxicityToxicityAssesses the toxicity of text data within a dataset to visualize the distribution of toxicity scores....TrueFalse['dataset']{}['nlp', 'text_data', 'data_validation']['nlp']
validmind.model_validation.BertScoreBert ScoreAssesses the quality of machine-generated text using BERTScore metrics and visualizes results through histograms...TrueTrue['dataset', 'model']{'evaluation_model': {'type': '_empty', 'default': 'distilbert-base-uncased'}}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.BleuScoreBleu ScoreEvaluates the quality of machine-generated text using BLEU metrics and visualizes the results through histograms...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.ClusterSizeDistributionCluster Size DistributionAssesses the performance of clustering models by comparing the distribution of cluster sizes in model predictions...TrueFalse['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.ContextualRecallContextual RecallEvaluates a Natural Language Generation model's ability to generate contextually relevant and factually correct...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.FeaturesAUCFeatures AUCEvaluates the discriminatory power of each individual feature within a binary classification model by calculating...TrueFalse['dataset']{'fontsize': {'type': 'int', 'default': 12}, 'figure_height': {'type': 'int', 'default': 500}}['feature_importance', 'AUC', 'visualization']['classification']
validmind.model_validation.MeteorScoreMeteor ScoreAssesses the quality of machine-generated translations by comparing them to human-produced references using the...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.ModelMetadataModel MetadataCompare metadata of different models and generate a summary table with the results....FalseTrue['model']{}['model_training', 'metadata']['regression', 'time_series_forecasting']
validmind.model_validation.ModelPredictionResidualsModel Prediction ResidualsAssesses normality and behavior of residuals in regression models through visualization and statistical tests....TrueTrue['dataset', 'model']{'nbins': {'type': 'int', 'default': 100}, 'p_value_threshold': {'type': 'float', 'default': 0.05}, 'start_date': {'type': None, 'default': None}, 'end_date': {'type': None, 'default': None}}['regression']['residual_analysis', 'visualization']
validmind.model_validation.RegardScoreRegard ScoreAssesses the sentiment and potential biases in text generated by NLP models by computing and visualizing regard...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.RegressionResidualsPlotRegression Residuals PlotEvaluates regression model performance using residual distribution and actual vs. predicted plots....TrueFalse['model', 'dataset']{'bin_size': {'type': 'float', 'default': 0.1}}['model_performance', 'visualization']['regression']
validmind.model_validation.RougeScoreRouge ScoreAssesses the quality of machine-generated text using ROUGE metrics and visualizes the results to provide...TrueTrue['dataset', 'model']{'metric': {'type': 'str', 'default': 'rouge-1'}}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.TimeSeriesPredictionWithCITime Series Prediction With CIAssesses predictive accuracy and uncertainty in time series models, highlighting breaches beyond confidence...TrueTrue['dataset', 'model']{'confidence': {'type': 'float', 'default': 0.95}}['model_predictions', 'visualization']['regression', 'time_series_forecasting']
validmind.model_validation.TimeSeriesPredictionsPlotTime Series Predictions PlotPlot actual vs predicted values for time series data and generate a visual comparison for the model....TrueFalse['dataset', 'model']{}['model_predictions', 'visualization']['regression', 'time_series_forecasting']
validmind.model_validation.TimeSeriesR2SquareBySegmentsTime Series R2 Square By SegmentsEvaluates the R-Squared values of regression models over specified time segments in time series data to assess...TrueTrue['dataset', 'model']{'segments': {'type': None, 'default': None}}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.TokenDisparityToken DisparityEvaluates the token disparity between reference and generated texts, visualizing the results through histograms and...TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.ToxicityScoreToxicity ScoreAssesses the toxicity levels of texts generated by NLP models to identify and mitigate harmful or offensive content....TrueTrue['dataset', 'model']{}['nlp', 'text_data', 'visualization']['text_classification', 'text_summarization']
validmind.model_validation.embeddings.ClusterDistributionCluster DistributionAssesses the distribution of text embeddings across clusters produced by a model using KMeans clustering....TrueFalse['model', 'dataset']{'num_clusters': {'type': 'int', 'default': 5}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.CosineSimilarityComparisonCosine Similarity ComparisonAssesses the similarity between embeddings generated by different models using Cosine Similarity, providing both...TrueTrue['dataset', 'models']{}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.CosineSimilarityDistributionCosine Similarity DistributionAssesses the similarity between predicted text embeddings from a model using a Cosine Similarity distribution...TrueFalse['dataset', 'model']{}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.CosineSimilarityHeatmapCosine Similarity HeatmapGenerates an interactive heatmap to visualize the cosine similarities among embeddings derived from a given model....TrueFalse['dataset', 'model']{'title': {'type': '_empty', 'default': 'Cosine Similarity Matrix'}, 'color': {'type': '_empty', 'default': 'Cosine Similarity'}, 'xaxis_title': {'type': '_empty', 'default': 'Index'}, 'yaxis_title': {'type': '_empty', 'default': 'Index'}, 'color_scale': {'type': '_empty', 'default': 'Blues'}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.DescriptiveAnalyticsDescriptive AnalyticsEvaluates statistical properties of text embeddings in an ML model via mean, median, and standard deviation...TrueFalse['dataset', 'model']{}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.EmbeddingsVisualization2DEmbeddings Visualization2 DVisualizes 2D representation of text embeddings generated by a model using t-SNE technique....TrueFalse['dataset', 'model']{'cluster_column': {'type': None, 'default': None}, 'perplexity': {'type': 'int', 'default': 30}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.EuclideanDistanceComparisonEuclidean Distance ComparisonAssesses and visualizes the dissimilarity between model embeddings using Euclidean distance, providing insights...TrueTrue['dataset', 'models']{}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.EuclideanDistanceHeatmapEuclidean Distance HeatmapGenerates an interactive heatmap to visualize the Euclidean distances among embeddings derived from a given model....TrueFalse['dataset', 'model']{'title': {'type': '_empty', 'default': 'Euclidean Distance Matrix'}, 'color': {'type': '_empty', 'default': 'Euclidean Distance'}, 'xaxis_title': {'type': '_empty', 'default': 'Index'}, 'yaxis_title': {'type': '_empty', 'default': 'Index'}, 'color_scale': {'type': '_empty', 'default': 'Blues'}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.PCAComponentsPairwisePlotsPCA Components Pairwise PlotsGenerates scatter plots for pairwise combinations of principal component analysis (PCA) components of model...TrueFalse['dataset', 'model']{'n_components': {'type': 'int', 'default': 3}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.embeddings.StabilityAnalysisKeywordStability Analysis KeywordEvaluates robustness of embedding models to keyword swaps in the test dataset....TrueTrue['dataset', 'model']{'keyword_dict': {'type': None, 'default': None}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.StabilityAnalysisRandomNoiseStability Analysis Random NoiseAssesses the robustness of text embeddings models to random noise introduced via text perturbations....TrueTrue['dataset', 'model']{'probability': {'type': 'float', 'default': 0.02}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.StabilityAnalysisSynonymsStability Analysis SynonymsEvaluates the stability of text embeddings models when words in test data are replaced by their synonyms randomly....TrueTrue['dataset', 'model']{'probability': {'type': 'float', 'default': 0.02}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.StabilityAnalysisTranslationStability Analysis TranslationEvaluates robustness of text embeddings models to noise introduced by translating the original text to another...TrueTrue['dataset', 'model']{'source_lang': {'type': 'str', 'default': 'en'}, 'target_lang': {'type': 'str', 'default': 'fr'}, 'mean_similarity_threshold': {'type': 'float', 'default': 0.7}}['llm', 'text_data', 'embeddings', 'visualization']['feature_extraction']
validmind.model_validation.embeddings.TSNEComponentsPairwisePlotsTSNE Components Pairwise PlotsCreates scatter plots for pairwise combinations of t-SNE components to visualize embeddings and highlight potential...TrueFalse['dataset', 'model']{'n_components': {'type': 'int', 'default': 2}, 'perplexity': {'type': 'int', 'default': 30}, 'title': {'type': 'str', 'default': 't-SNE'}}['visualization', 'dimensionality_reduction', 'embeddings']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.AnswerCorrectnessAnswer CorrectnessEvaluates the correctness of answers in a dataset with respect to the provided ground...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'response_column': {'type': 'str', 'default': 'response'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.AspectCriticAspect CriticEvaluates generations against the following aspects: harmfulness, maliciousness,...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'response_column': {'type': 'str', 'default': 'response'}, 'retrieved_contexts_column': {'type': None, 'default': None}, 'aspects': {'type': None, 'default': ['coherence', 'conciseness', 'correctness', 'harmfulness', 'maliciousness']}, 'additional_aspects': {'type': None, 'default': None}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'qualitative']['text_summarization', 'text_generation', 'text_qa']
validmind.model_validation.ragas.ContextEntityRecallContext Entity RecallEvaluates the context entity recall for dataset entries and visualizes the results....TrueTrue['dataset']{'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.ContextPrecisionContext PrecisionContext Precision is a metric that evaluates whether all of the ground-truth...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization', 'text_classification']
validmind.model_validation.ragas.ContextPrecisionWithoutReferenceContext Precision Without ReferenceContext Precision Without Reference is a metric used to evaluate the relevance of...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'response_column': {'type': 'str', 'default': 'response'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization', 'text_classification']
validmind.model_validation.ragas.ContextRecallContext RecallContext recall measures the extent to which the retrieved context aligns with the...TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'retrieval_performance']['text_qa', 'text_generation', 'text_summarization', 'text_classification']
validmind.model_validation.ragas.FaithfulnessFaithfulnessEvaluates the faithfulness of the generated answers with respect to retrieved contexts....TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'response_column': {'type': 'str', 'default': 'response'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'rag_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.NoiseSensitivityNoise SensitivityAssesses the sensitivity of a Large Language Model (LLM) to noise in retrieved context by measuring how often it...TrueTrue['dataset']{'response_column': {'type': 'str', 'default': 'response'}, 'retrieved_contexts_column': {'type': 'str', 'default': 'retrieved_contexts'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'focus': {'type': 'str', 'default': 'relevant'}, 'user_input_column': {'type': 'str', 'default': 'user_input'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'rag_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.ResponseRelevancyResponse RelevancyAssesses how pertinent the generated answer is to the given prompt....TrueTrue['dataset']{'user_input_column': {'type': 'str', 'default': 'user_input'}, 'retrieved_contexts_column': {'type': 'str', 'default': None}, 'response_column': {'type': 'str', 'default': 'response'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm', 'rag_performance']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.ragas.SemanticSimilaritySemantic SimilarityCalculates the semantic similarity between generated responses and ground truths...TrueTrue['dataset']{'response_column': {'type': 'str', 'default': 'response'}, 'reference_column': {'type': 'str', 'default': 'reference'}, 'judge_llm': {'type': '_empty', 'default': None}, 'judge_embeddings': {'type': '_empty', 'default': None}}['ragas', 'llm']['text_qa', 'text_generation', 'text_summarization']
validmind.model_validation.sklearn.AdjustedMutualInformationAdjusted Mutual InformationEvaluates clustering model performance by measuring mutual information between true and predicted labels, adjusting...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.AdjustedRandIndexAdjusted Rand IndexMeasures the similarity between two data clusters using the Adjusted Rand Index (ARI) metric in clustering machine...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CalibrationCurveCalibration CurveEvaluates the calibration of probability estimates by comparing predicted probabilities against observed...TrueFalse['model', 'dataset']{'n_bins': {'type': 'int', 'default': 10}}['sklearn', 'model_performance', 'classification']['classification']
validmind.model_validation.sklearn.ClassifierPerformanceClassifier PerformanceEvaluates performance of binary or multiclass classification models using precision, recall, F1-Score, accuracy,...FalseTrue['dataset', 'model']{'average': {'type': 'str', 'default': 'macro'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ClassifierThresholdOptimizationClassifier Threshold OptimizationAnalyzes and visualizes different threshold optimization methods for binary classification models....FalseTrue['dataset', 'model']{'methods': {'type': None, 'default': None}, 'target_recall': {'type': None, 'default': None}}['model_validation', 'threshold_optimization', 'classification_metrics']['classification']
validmind.model_validation.sklearn.ClusterCosineSimilarityCluster Cosine SimilarityMeasures the intra-cluster similarity of a clustering model using cosine similarity....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ClusterPerformanceMetricsCluster Performance MetricsEvaluates the performance of clustering machine learning models using multiple established metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CompletenessScoreCompleteness ScoreEvaluates a clustering model's capacity to categorize instances from a single class into the same cluster....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.FeatureImportanceFeature ImportanceCompute feature importance scores for a given model and generate a summary table...FalseTrue['dataset', 'model']{'num_features': {'type': 'int', 'default': 3}}['model_explainability', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.FowlkesMallowsScoreFowlkes Mallows ScoreEvaluates the similarity between predicted and actual cluster assignments in a model using the Fowlkes-Mallows...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HomogeneityScoreHomogeneity ScoreAssesses clustering homogeneity by comparing true and predicted labels, scoring from 0 (heterogeneous) to 1...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HyperParametersTuningHyper Parameters TuningPerforms exhaustive grid search over specified parameter ranges to find optimal model configurations...FalseTrue['model', 'dataset']{'param_grid': {'type': 'dict', 'default': None}, 'scoring': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}, 'fit_params': {'type': 'dict', 'default': None}}['sklearn', 'model_performance']['clustering', 'classification']
validmind.model_validation.sklearn.KMeansClustersOptimizationK Means Clusters OptimizationOptimizes the number of clusters in K-means models using Elbow and Silhouette methods....TrueFalse['model', 'dataset']{'n_clusters': {'type': None, 'default': None}}['sklearn', 'model_performance', 'kmeans']['clustering']
validmind.model_validation.sklearn.MinimumAccuracyMinimum AccuracyChecks if the model's prediction accuracy meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.7}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumF1ScoreMinimum F1 ScoreAssesses if the model's F1 score on the validation set meets a predefined minimum threshold, ensuring balanced...FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumROCAUCScoreMinimum ROCAUC ScoreValidates model by checking if the ROC AUC score meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ModelParametersModel ParametersExtracts and displays model parameters in a structured format for transparency and reproducibility....FalseTrue['model']{'model_params': {'type': None, 'default': None}}['model_training', 'metadata']['classification', 'regression']
validmind.model_validation.sklearn.ModelsPerformanceComparisonModels Performance ComparisonEvaluates and compares the performance of multiple Machine Learning models using various metrics like accuracy,...FalseTrue['dataset', 'models']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'model_comparison']['classification', 'text_classification']
validmind.model_validation.sklearn.OverfitDiagnosisOverfit DiagnosisAssesses potential overfitting in a model's predictions, identifying regions where performance between training and...TrueTrue['model', 'datasets']{'metric': {'type': 'str', 'default': None}, 'cut_off_threshold': {'type': 'float', 'default': 0.04}}['sklearn', 'binary_classification', 'multiclass_classification', 'linear_regression', 'model_diagnosis']['classification', 'regression']
validmind.model_validation.sklearn.PermutationFeatureImportancePermutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['model', 'dataset']{'fontsize': {'type': None, 'default': None}, 'figure_height': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PopulationStabilityIndexPopulation Stability IndexAssesses the Population Stability Index (PSI) to quantify the stability of an ML model's predictions across...TrueTrue['datasets', 'model']{'num_bins': {'type': 'int', 'default': 10}, 'mode': {'type': 'str', 'default': 'fixed'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.RegressionErrorsRegression ErrorsAssesses the performance and error distribution of a regression model using various error metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression', 'classification']
validmind.model_validation.sklearn.RegressionErrorsComparisonRegression Errors ComparisonAssesses multiple regression error metrics to compare model performance across different datasets, emphasizing...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RegressionPerformanceRegression PerformanceEvaluates the performance of a regression model using five different metrics: MAE, MSE, RMSE, MAPE, and MBD....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareRegression R2 SquareAssesses the overall goodness-of-fit of a regression model by evaluating R-squared (R2) and Adjusted R-squared (Adj...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareComparisonRegression R2 Square ComparisonCompares R-Squared and Adjusted R-Squared values for different regression models across multiple datasets to assess...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RobustnessDiagnosisRobustness DiagnosisAssesses the robustness of a machine learning model by evaluating performance decay under noisy conditions....TrueTrue['datasets', 'model']{'metric': {'type': 'str', 'default': None}, 'scaling_factor_std_dev_list': {'type': None, 'default': [0.1, 0.2, 0.3, 0.4, 0.5]}, 'performance_decay_threshold': {'type': 'float', 'default': 0.05}}['sklearn', 'model_diagnosis', 'visualization']['classification', 'regression']
validmind.model_validation.sklearn.SHAPGlobalImportanceSHAP Global ImportanceEvaluates and visualizes global feature importance using SHAP values for model explanation and risk identification....FalseTrue['model', 'dataset']{'kernel_explainer_samples': {'type': 'int', 'default': 10}, 'tree_or_linear_explainer_samples': {'type': 'int', 'default': 200}, 'class_of_interest': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ScoreProbabilityAlignmentScore Probability AlignmentAnalyzes the alignment between credit scores and predicted probabilities....TrueTrue['model', 'dataset']{'score_column': {'type': 'str', 'default': 'score'}, 'n_bins': {'type': 'int', 'default': 10}}['visualization', 'credit_risk', 'calibration']['classification']
validmind.model_validation.sklearn.SilhouettePlotSilhouette PlotCalculates and visualizes Silhouette Score, assessing the degree of data point suitability to its cluster in ML...TrueTrue['model', 'dataset']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.VMeasureV MeasureEvaluates homogeneity and completeness of a clustering model using the V Measure Score....FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.WeakspotsDiagnosisWeakspots DiagnosisIdentifies and visualizes weak spots in a machine learning model's performance across various sections of the...TrueTrue['datasets', 'model']{'features_columns': {'type': None, 'default': None}, 'metrics': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_diagnosis', 'visualization']['classification', 'text_classification']
validmind.model_validation.statsmodels.AutoARIMAAuto ARIMAEvaluates ARIMA models for time-series forecasting, ranking them using Bayesian and Akaike Information Criteria....FalseTrue['model', 'dataset']{}['time_series_data', 'forecasting', 'model_selection', 'statsmodels']['regression']
validmind.model_validation.statsmodels.CumulativePredictionProbabilitiesCumulative Prediction ProbabilitiesVisualizes cumulative probabilities of positive and negative classes for both training and testing in classification models....TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Cumulative Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.DurbinWatsonTestDurbin Watson TestAssesses autocorrelation in time series data features using the Durbin-Watson statistic....FalseTrue['dataset', 'model']{'threshold': {'type': None, 'default': [1.5, 2.5]}}['time_series_data', 'forecasting', 'statistical_test', 'statsmodels']['regression']
validmind.model_validation.statsmodels.GINITableGINI TableEvaluates classification model performance using AUC, GINI, and KS metrics for training and test datasets....FalseTrue['dataset', 'model']{}['model_performance']['classification']
validmind.model_validation.statsmodels.KolmogorovSmirnovKolmogorov SmirnovAssesses whether each feature in the dataset aligns with a normal distribution using the Kolmogorov-Smirnov test....FalseTrue['model', 'dataset']{'dist': {'type': 'str', 'default': 'norm'}}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.LillieforsLillieforsAssesses the normality of feature distributions in an ML model's training dataset using the Lilliefors test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.PredictionProbabilitiesHistogramPrediction Probabilities HistogramAssesses the predictive probability distribution for binary classification to evaluate model performance and...TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Histogram of Predictive Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.RegressionCoeffsRegression CoeffsAssesses the significance and uncertainty of predictor variables in a regression model through visualization of...TrueTrue['model']{}['tabular_data', 'visualization', 'model_training']['regression']
validmind.model_validation.statsmodels.RegressionFeatureSignificanceRegression Feature SignificanceAssesses and visualizes the statistical significance of features in a regression model....TrueFalse['model']{'fontsize': {'type': 'int', 'default': 10}, 'p_threshold': {'type': 'float', 'default': 0.05}}['statistical_test', 'model_interpretation', 'visualization', 'feature_importance']['regression']
validmind.model_validation.statsmodels.RegressionModelForecastPlotRegression Model Forecast PlotGenerates plots to visually compare the forecasted outcomes of a regression model against actual observed values over...TrueFalse['model', 'dataset']{'start_date': {'type': None, 'default': None}, 'end_date': {'type': None, 'default': None}}['time_series_data', 'forecasting', 'visualization']['regression']
validmind.model_validation.statsmodels.RegressionModelForecastPlotLevelsRegression Model Forecast Plot LevelsAssesses the alignment between forecasted and observed values in regression models through visual plots...TrueFalse['model', 'dataset']{}['time_series_data', 'forecasting', 'visualization']['regression']
validmind.model_validation.statsmodels.RegressionModelSensitivityPlotRegression Model Sensitivity PlotAssesses the sensitivity of a regression model to changes in independent variables by applying shocks and...TrueFalse['dataset', 'model']{'shocks': {'type': None, 'default': [0.1]}, 'transformation': {'type': None, 'default': None}}['senstivity_analysis', 'visualization']['regression']
validmind.model_validation.statsmodels.RegressionModelSummaryRegression Model SummaryEvaluates regression model performance using metrics including R-Squared, Adjusted R-Squared, MSE, and RMSE....FalseTrue['dataset', 'model']{}['model_performance', 'regression']['regression']
validmind.model_validation.statsmodels.RegressionPermutationFeatureImportanceRegression Permutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['dataset', 'model']{'fontsize': {'type': 'int', 'default': 12}, 'figure_height': {'type': 'int', 'default': 500}}['statsmodels', 'feature_importance', 'visualization']['regression']
validmind.model_validation.statsmodels.ScorecardHistogramScorecard HistogramThe Scorecard Histogram test evaluates the distribution of credit scores between default and non-default instances,...TrueFalse['dataset']{'title': {'type': 'str', 'default': 'Histogram of Scores'}, 'score_column': {'type': 'str', 'default': 'score'}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassDiscriminationDriftClass Discrimination DriftCompares classification discrimination metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassImbalanceDriftClass Imbalance DriftEvaluates drift in class distribution between reference and monitoring datasets....TrueTrue['datasets']{'drift_pct_threshold': {'type': 'float', 'default': 5.0}, 'title': {'type': 'str', 'default': 'Class Distribution Drift'}}['tabular_data', 'binary_classification', 'multiclass_classification']['classification']
validmind.ongoing_monitoring.ClassificationAccuracyDriftClassification Accuracy DriftCompares classification accuracy metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ConfusionMatrixDriftConfusion Matrix DriftCompares confusion matrix metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.CumulativePredictionProbabilitiesDriftCumulative Prediction Probabilities DriftCompares cumulative prediction probability distributions between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.FeatureDriftFeature DriftEvaluates changes in feature distribution over time to identify potential model drift....TrueTrue['datasets']{'bins': {'type': '_empty', 'default': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]}, 'feature_columns': {'type': '_empty', 'default': None}, 'psi_threshold': {'type': '_empty', 'default': 0.2}}['visualization']['monitoring']
validmind.ongoing_monitoring.PredictionAcrossEachFeaturePrediction Across Each FeatureAssesses differences in model predictions across individual features between reference and monitoring datasets...TrueFalse['datasets', 'model']{}['visualization']['monitoring']
validmind.ongoing_monitoring.PredictionCorrelationPrediction CorrelationAssesses correlation changes between model predictions from reference and monitoring datasets to detect potential...TrueTrue['datasets', 'model']{'drift_pct_threshold': {'type': 'float', 'default': 20}}['visualization']['monitoring']
validmind.ongoing_monitoring.PredictionProbabilitiesHistogramDriftPrediction Probabilities Histogram DriftCompares prediction probability distributions between reference and monitoring datasets....TrueTrue['datasets', 'model']{'title': {'type': '_empty', 'default': 'Prediction Probabilities Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.PredictionQuantilesAcrossFeaturesPrediction Quantiles Across FeaturesAssesses differences in model prediction distributions across individual features between reference...TrueFalse['datasets', 'model']{}['visualization']['monitoring']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ScoreBandsDriftScore Bands DriftAnalyzes drift in population distribution and default rates across score bands....FalseTrue['datasets', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}, 'drift_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.ongoing_monitoring.ScorecardHistogramDriftScorecard Histogram DriftCompares score distributions between reference and monitoring datasets for each class....TrueTrue['datasets']{'score_column': {'type': 'str', 'default': 'score'}, 'title': {'type': 'str', 'default': 'Scorecard Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.ongoing_monitoring.TargetPredictionDistributionPlotTarget Prediction Distribution PlotAssesses differences in prediction distributions between a reference dataset and a monitoring dataset to identify...TrueTrue['datasets', 'model']{'drift_pct_threshold': {'type': 'float', 'default': 20}}['visualization']['monitoring']
validmind.prompt_validation.BiasBiasAssesses potential bias in a Large Language Model by analyzing the distribution and order of exemplars in the...FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.ClarityClarityEvaluates and scores the clarity of prompts in a Large Language Model based on specified guidelines....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.ConcisenessConcisenessAnalyzes and grades the conciseness of prompts provided to a Large Language Model....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.DelimitationDelimitationEvaluates the proper use of delimiters in prompts provided to Large Language Models....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.NegativeInstructionNegative InstructionEvaluates and grades the use of affirmative, proactive language over negative instructions in LLM prompts....FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.RobustnessRobustnessAssesses the robustness of prompts provided to a Large Language Model under varying conditions and contexts. This test...FalseTrue['model', 'dataset']{'num_tests': {'type': '_empty', 'default': 10}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.prompt_validation.SpecificitySpecificityEvaluates and scores the specificity of prompts provided to a Large Language Model (LLM), based on clarity, detail,...FalseTrue['model']{'min_threshold': {'type': '_empty', 'default': 7}, 'judge_llm': {'type': '_empty', 'default': None}}['llm', 'zero_shot', 'few_shot']['text_classification', 'text_summarization']
validmind.unit_metrics.classification.AccuracyAccuracyCalculates the accuracy of a modelFalseFalse['dataset', 'model']{}['classification']['classification']
validmind.unit_metrics.classification.F1F1Calculates the F1 score for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.PrecisionPrecisionCalculates the precision for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.ROC_AUCROC AUCCalculates the ROC AUC for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.RecallRecallCalculates the recall for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.regression.AdjustedRSquaredScoreAdjusted R Squared ScoreCalculates the adjusted R-squared score for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.GiniCoefficientGini CoefficientCalculates the Gini coefficient for a regression model.FalseFalse['dataset', 'model']{}['regression']['regression']
validmind.unit_metrics.regression.HuberLossHuber LossCalculates the Huber loss for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.KolmogorovSmirnovStatisticKolmogorov Smirnov StatisticCalculates the Kolmogorov-Smirnov statistic for a regression model.FalseFalse['dataset', 'model']{}['regression']['regression']
validmind.unit_metrics.regression.MeanAbsoluteErrorMean Absolute ErrorCalculates the mean absolute error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.MeanAbsolutePercentageErrorMean Absolute Percentage ErrorCalculates the mean absolute percentage error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.MeanBiasDeviationMean Bias DeviationCalculates the mean bias deviation for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.MeanSquaredErrorMean Squared ErrorCalculates the mean squared error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.QuantileLossQuantile LossCalculates the quantile loss for a regression model.FalseFalse['model', 'dataset']{'quantile': {'type': '_empty', 'default': 0.5}}['regression']['regression']
validmind.unit_metrics.regression.RSquaredScoreR Squared ScoreCalculates the R-squared score for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
validmind.unit_metrics.regression.RootMeanSquaredErrorRoot Mean Squared ErrorCalculates the root mean squared error for a regression model.FalseFalse['model', 'dataset']{}['regression']['regression']
\n" ], - "source": [ - "list_tags()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, to match each task type with its related tags, use the [list_tasks_and_tags()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tasks_and_tags) function:" + "text/plain": [ + "" ] - }, + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tests()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Understand tags and task types\n", + "\n", + "Use [list_tasks()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tasks) to view all unique task types used to classify tests in the ValidMind Library.\n", + "\n", + "Understanding `task` types helps you filter tests that match your model’s objective. For example:\n", + "\n", + "- **classification:** Works with Classification Models and Datasets.\n", + "- **regression:** Works with Regression Models and Datasets.\n", + "- **text classification:** Works with Text Classification Models and Datasets.\n", + "- **text summarization:** Works with Text Summarization Models and Datasets." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
TaskTags
regressionsenstivity_analysis, tabular_data, time_series_data, model_predictions, feature_selection, correlation, regression, statsmodels, model_performance, model_training, multiclass_classification, linear_regression, data_quality, text_data, model_explainability, binary_classification, stationarity, bias_and_fairness, numerical_data, sklearn, model_selection, statistical_test, descriptive_statistics, seasonality, analysis, data_validation, data_distribution, metadata, feature_importance, visualization, forecasting, model_diagnosis, model_interpretation, unit_root_test, categorical_data, data_analysis
classificationcalibration, anomaly_detection, classification_metrics, tabular_data, time_series_data, feature_selection, correlation, statsmodels, model_performance, model_validation, model_training, classification, multiclass_classification, linear_regression, data_quality, text_data, binary_classification, threshold_optimization, bias_and_fairness, scorecard, model_comparison, numerical_data, sklearn, statistical_test, descriptive_statistics, feature_importance, data_distribution, metadata, visualization, credit_risk, AUC, logistic_regression, model_diagnosis, categorical_data, data_analysis
text_classificationmodel_performance, feature_importance, multiclass_classification, few_shot, frequency_analysis, zero_shot, text_data, visualization, llm, binary_classification, ragas, model_diagnosis, model_comparison, sklearn, nlp, retrieval_performance, tabular_data, time_series_data
text_summarizationqualitative, few_shot, frequency_analysis, embeddings, zero_shot, text_data, visualization, llm, rag_performance, ragas, retrieval_performance, nlp, dimensionality_reduction, tabular_data, time_series_data
data_validationstationarity, statsmodels, unit_root_test, time_series_data
time_series_forecastingmodel_training, data_validation, metadata, visualization, model_explainability, sklearn, model_performance, model_predictions, time_series_data
nlpdata_validation, frequency_analysis, text_data, visualization, nlp
clusteringclustering, model_performance, kmeans, sklearn
residual_analysisregression
visualizationregression
feature_extractionembeddings, text_data, visualization, llm
text_qaqualitative, embeddings, visualization, llm, rag_performance, ragas, dimensionality_reduction, retrieval_performance
text_generationqualitative, embeddings, visualization, llm, rag_performance, ragas, dimensionality_reduction, retrieval_performance
monitoringvisualization
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "list_tasks_and_tags()" + "data": { + "text/plain": [ + "['text_qa',\n", + " 'classification',\n", + " 'data_validation',\n", + " 'text_classification',\n", + " 'feature_extraction',\n", + " 'regression',\n", + " 'visualization',\n", + " 'clustering',\n", + " 'time_series_forecasting',\n", + " 'text_summarization',\n", + " 'nlp',\n", + " 'residual_analysis',\n", + " 'monitoring',\n", + " 'text_generation']" ] - }, + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tasks()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use [list_tags()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tags) to view all unique tags used to describe tests in the ValidMind Library.\n", + "\n", + "`Tags` describe what a test applies to and help you filter tests for your use case. Examples include:\n", + "\n", + "- **llm:** Tests that work with Large Language Models.\n", + "- **nlp:** Tests relevant for natural language processing.\n", + "- **binary_classification:** Tests for binary classification tasks.\n", + "- **forecasting:** Tests for forecasting and time-series analysis.\n", + "- **tabular_data:** Tests for tabular data like CSVs and Excel spreadsheets.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Filter tests by tags and task types\n", - "\n", - "While listing all tests is useful, you’ll often want to narrow your search. The [list_tests()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) function supports `filter`, `task`, and `tags` parameters to assist in refining your results.\n", - "\n", - "Use the `filter` parameter to find tests that match a specific keyword, such as `sklearn`:\n" + "data": { + "text/plain": [ + "['senstivity_analysis',\n", + " 'calibration',\n", + " 'clustering',\n", + " 'anomaly_detection',\n", + " 'nlp',\n", + " 'classification_metrics',\n", + " 'dimensionality_reduction',\n", + " 'tabular_data',\n", + " 'time_series_data',\n", + " 'model_predictions',\n", + " 'feature_selection',\n", + " 'correlation',\n", + " 'frequency_analysis',\n", + " 'embeddings',\n", + " 'regression',\n", + " 'llm',\n", + " 'statsmodels',\n", + " 'ragas',\n", + " 'model_performance',\n", + " 'model_validation',\n", + " 'rag_performance',\n", + " 'model_training',\n", + " 'qualitative',\n", + " 'classification',\n", + " 'kmeans',\n", + " 'multiclass_classification',\n", + " 'linear_regression',\n", + " 'data_quality',\n", + " 'text_data',\n", + " 'binary_classification',\n", + " 'threshold_optimization',\n", + " 'stationarity',\n", + " 'bias_and_fairness',\n", + " 'scorecard',\n", + " 'model_explainability',\n", + " 'model_comparison',\n", + " 'numerical_data',\n", + " 'sklearn',\n", + " 'model_selection',\n", + " 'retrieval_performance',\n", + " 'zero_shot',\n", + " 'statistical_test',\n", + " 'descriptive_statistics',\n", + " 'seasonality',\n", + " 'analysis',\n", + " 'data_validation',\n", + " 'data_distribution',\n", + " 'feature_importance',\n", + " 'metadata',\n", + " 'few_shot',\n", + " 'visualization',\n", + " 'credit_risk',\n", + " 'forecasting',\n", + " 'AUC',\n", + " 'logistic_regression',\n", + " 'model_diagnosis',\n", + " 'model_interpretation',\n", + " 'unit_root_test',\n", + " 'categorical_data',\n", + " 'data_analysis']" ] - }, + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tags()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, to match each task type with its related tags, use the [list_tasks_and_tags()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tasks_and_tags) function:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.model_validation.ClusterSizeDistributionCluster Size DistributionAssesses the performance of clustering models by comparing the distribution of cluster sizes in model predictions...TrueFalse['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.TimeSeriesR2SquareBySegmentsTime Series R2 Square By SegmentsEvaluates the R-Squared values of regression models over specified time segments in time series data to assess...TrueTrue['dataset', 'model']{'segments': {'type': None, 'default': None}}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.AdjustedMutualInformationAdjusted Mutual InformationEvaluates clustering model performance by measuring mutual information between true and predicted labels, adjusting...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.AdjustedRandIndexAdjusted Rand IndexMeasures the similarity between two data clusters using the Adjusted Rand Index (ARI) metric in clustering machine...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CalibrationCurveCalibration CurveEvaluates the calibration of probability estimates by comparing predicted probabilities against observed...TrueFalse['model', 'dataset']{'n_bins': {'type': 'int', 'default': 10}}['sklearn', 'model_performance', 'classification']['classification']
validmind.model_validation.sklearn.ClassifierPerformanceClassifier PerformanceEvaluates performance of binary or multiclass classification models using precision, recall, F1-Score, accuracy,...FalseTrue['dataset', 'model']{'average': {'type': 'str', 'default': 'macro'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ClassifierThresholdOptimizationClassifier Threshold OptimizationAnalyzes and visualizes different threshold optimization methods for binary classification models....FalseTrue['dataset', 'model']{'methods': {'type': None, 'default': None}, 'target_recall': {'type': None, 'default': None}}['model_validation', 'threshold_optimization', 'classification_metrics']['classification']
validmind.model_validation.sklearn.ClusterCosineSimilarityCluster Cosine SimilarityMeasures the intra-cluster similarity of a clustering model using cosine similarity....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ClusterPerformanceMetricsCluster Performance MetricsEvaluates the performance of clustering machine learning models using multiple established metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CompletenessScoreCompleteness ScoreEvaluates a clustering model's capacity to categorize instances from a single class into the same cluster....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.FeatureImportanceFeature ImportanceCompute feature importance scores for a given model and generate a summary table...FalseTrue['dataset', 'model']{'num_features': {'type': 'int', 'default': 3}}['model_explainability', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.FowlkesMallowsScoreFowlkes Mallows ScoreEvaluates the similarity between predicted and actual cluster assignments in a model using the Fowlkes-Mallows...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HomogeneityScoreHomogeneity ScoreAssesses clustering homogeneity by comparing true and predicted labels, scoring from 0 (heterogeneous) to 1...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HyperParametersTuningHyper Parameters TuningPerforms exhaustive grid search over specified parameter ranges to find optimal model configurations...FalseTrue['model', 'dataset']{'param_grid': {'type': 'dict', 'default': None}, 'scoring': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}, 'fit_params': {'type': 'dict', 'default': None}}['sklearn', 'model_performance']['clustering', 'classification']
validmind.model_validation.sklearn.KMeansClustersOptimizationK Means Clusters OptimizationOptimizes the number of clusters in K-means models using Elbow and Silhouette methods....TrueFalse['model', 'dataset']{'n_clusters': {'type': None, 'default': None}}['sklearn', 'model_performance', 'kmeans']['clustering']
validmind.model_validation.sklearn.MinimumAccuracyMinimum AccuracyChecks if the model's prediction accuracy meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.7}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumF1ScoreMinimum F1 ScoreAssesses if the model's F1 score on the validation set meets a predefined minimum threshold, ensuring balanced...FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumROCAUCScoreMinimum ROCAUC ScoreValidates model by checking if the ROC AUC score meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ModelParametersModel ParametersExtracts and displays model parameters in a structured format for transparency and reproducibility....FalseTrue['model']{'model_params': {'type': None, 'default': None}}['model_training', 'metadata']['classification', 'regression']
validmind.model_validation.sklearn.ModelsPerformanceComparisonModels Performance ComparisonEvaluates and compares the performance of multiple Machine Learning models using various metrics like accuracy,...FalseTrue['dataset', 'models']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'model_comparison']['classification', 'text_classification']
validmind.model_validation.sklearn.OverfitDiagnosisOverfit DiagnosisAssesses potential overfitting in a model's predictions, identifying regions where performance between training and...TrueTrue['model', 'datasets']{'metric': {'type': 'str', 'default': None}, 'cut_off_threshold': {'type': 'float', 'default': 0.04}}['sklearn', 'binary_classification', 'multiclass_classification', 'linear_regression', 'model_diagnosis']['classification', 'regression']
validmind.model_validation.sklearn.PermutationFeatureImportancePermutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['model', 'dataset']{'fontsize': {'type': None, 'default': None}, 'figure_height': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PopulationStabilityIndexPopulation Stability IndexAssesses the Population Stability Index (PSI) to quantify the stability of an ML model's predictions across...TrueTrue['datasets', 'model']{'num_bins': {'type': 'int', 'default': 10}, 'mode': {'type': 'str', 'default': 'fixed'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.RegressionErrorsRegression ErrorsAssesses the performance and error distribution of a regression model using various error metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression', 'classification']
validmind.model_validation.sklearn.RegressionErrorsComparisonRegression Errors ComparisonAssesses multiple regression error metrics to compare model performance across different datasets, emphasizing...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RegressionPerformanceRegression PerformanceEvaluates the performance of a regression model using five different metrics: MAE, MSE, RMSE, MAPE, and MBD....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareRegression R2 SquareAssesses the overall goodness-of-fit of a regression model by evaluating R-squared (R2) and Adjusted R-squared (Adj...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareComparisonRegression R2 Square ComparisonCompares R-Squared and Adjusted R-Squared values for different regression models across multiple datasets to assess...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RobustnessDiagnosisRobustness DiagnosisAssesses the robustness of a machine learning model by evaluating performance decay under noisy conditions....TrueTrue['datasets', 'model']{'metric': {'type': 'str', 'default': None}, 'scaling_factor_std_dev_list': {'type': None, 'default': [0.1, 0.2, 0.3, 0.4, 0.5]}, 'performance_decay_threshold': {'type': 'float', 'default': 0.05}}['sklearn', 'model_diagnosis', 'visualization']['classification', 'regression']
validmind.model_validation.sklearn.SHAPGlobalImportanceSHAP Global ImportanceEvaluates and visualizes global feature importance using SHAP values for model explanation and risk identification....FalseTrue['model', 'dataset']{'kernel_explainer_samples': {'type': 'int', 'default': 10}, 'tree_or_linear_explainer_samples': {'type': 'int', 'default': 200}, 'class_of_interest': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ScoreProbabilityAlignmentScore Probability AlignmentAnalyzes the alignment between credit scores and predicted probabilities....TrueTrue['model', 'dataset']{'score_column': {'type': 'str', 'default': 'score'}, 'n_bins': {'type': 'int', 'default': 10}}['visualization', 'credit_risk', 'calibration']['classification']
validmind.model_validation.sklearn.SilhouettePlotSilhouette PlotCalculates and visualizes Silhouette Score, assessing the degree of data point suitability to its cluster in ML...TrueTrue['model', 'dataset']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.VMeasureV MeasureEvaluates homogeneity and completeness of a clustering model using the V Measure Score....FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.WeakspotsDiagnosisWeakspots DiagnosisIdentifies and visualizes weak spots in a machine learning model's performance across various sections of the...TrueTrue['datasets', 'model']{'features_columns': {'type': None, 'default': None}, 'metrics': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_diagnosis', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassDiscriminationDriftClass Discrimination DriftCompares classification discrimination metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassificationAccuracyDriftClassification Accuracy DriftCompares classification accuracy metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ConfusionMatrixDriftConfusion Matrix DriftCompares confusion matrix metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
TaskTags
regressionsenstivity_analysis, tabular_data, time_series_data, model_predictions, feature_selection, correlation, regression, statsmodels, model_performance, model_training, multiclass_classification, linear_regression, data_quality, text_data, model_explainability, binary_classification, stationarity, bias_and_fairness, numerical_data, sklearn, model_selection, statistical_test, descriptive_statistics, seasonality, analysis, data_validation, data_distribution, metadata, feature_importance, visualization, forecasting, model_diagnosis, model_interpretation, unit_root_test, categorical_data, data_analysis
classificationcalibration, anomaly_detection, classification_metrics, tabular_data, time_series_data, feature_selection, correlation, statsmodels, model_performance, model_validation, model_training, classification, multiclass_classification, linear_regression, data_quality, text_data, binary_classification, threshold_optimization, bias_and_fairness, scorecard, model_comparison, numerical_data, sklearn, statistical_test, descriptive_statistics, feature_importance, data_distribution, metadata, visualization, credit_risk, AUC, logistic_regression, model_diagnosis, categorical_data, data_analysis
text_classificationmodel_performance, feature_importance, multiclass_classification, few_shot, frequency_analysis, zero_shot, text_data, visualization, llm, binary_classification, ragas, model_diagnosis, model_comparison, sklearn, nlp, retrieval_performance, tabular_data, time_series_data
text_summarizationqualitative, few_shot, frequency_analysis, embeddings, zero_shot, text_data, visualization, llm, rag_performance, ragas, retrieval_performance, nlp, dimensionality_reduction, tabular_data, time_series_data
data_validationstationarity, statsmodels, unit_root_test, time_series_data
time_series_forecastingmodel_training, data_validation, metadata, visualization, model_explainability, sklearn, model_performance, model_predictions, time_series_data
nlpdata_validation, frequency_analysis, text_data, visualization, nlp
clusteringclustering, model_performance, kmeans, sklearn
residual_analysisregression
visualizationregression
feature_extractionembeddings, text_data, visualization, llm
text_qaqualitative, embeddings, visualization, llm, rag_performance, ragas, dimensionality_reduction, retrieval_performance
text_generationqualitative, embeddings, visualization, llm, rag_performance, ragas, dimensionality_reduction, retrieval_performance
monitoringvisualization
\n" ], - "source": [ - "list_tests(filter=\"sklearn\")" + "text/plain": [ + "" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use the `task` parameter to find tests that match a specific task type, such as `classification`:\n", - "\n", - "\n" - ] - }, + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tasks_and_tags()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Filter tests by tags and task types\n", + "\n", + "While listing all tests is useful, you’ll often want to narrow your search. The [list_tests()](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) function supports `filter`, `task`, and `tags` parameters to assist in refining your results.\n", + "\n", + "Use the `filter` parameter to find tests that match a specific keyword, such as `sklearn`:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.data_validation.BivariateScatterPlotsBivariate Scatter PlotsGenerates bivariate scatterplots to visually inspect relationships between pairs of numerical predictor variables...TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'visualization']['classification']
validmind.data_validation.ChiSquaredFeaturesTableChi Squared Features TableAssesses the statistical association between categorical features and a target variable using the Chi-Squared test....FalseTrue['dataset']{'p_threshold': {'type': '_empty', 'default': 0.05}}['tabular_data', 'categorical_data', 'statistical_test']['classification']
validmind.data_validation.ClassImbalanceClass ImbalanceEvaluates and quantifies class distribution imbalance in a dataset used by a machine learning model....TrueTrue['dataset']{'min_percent_threshold': {'type': 'int', 'default': 10}}['tabular_data', 'binary_classification', 'multiclass_classification', 'data_quality']['classification']
validmind.data_validation.DatasetDescriptionDataset DescriptionProvides comprehensive analysis and statistical summaries of each column in a machine learning model's dataset....FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DatasetSplitDataset SplitEvaluates and visualizes the distribution proportions among training, testing, and validation datasets of an ML...FalseTrue['datasets']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DescriptiveStatisticsDescriptive StatisticsPerforms a detailed descriptive statistical analysis of both numerical and categorical data within a model's...FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'data_quality']['classification', 'regression']
validmind.data_validation.DuplicatesDuplicatesTests dataset for duplicate entries, ensuring model reliability via data quality verification....FalseTrue['dataset']{'min_threshold': {'type': '_empty', 'default': 1}}['tabular_data', 'data_quality', 'text_data']['classification', 'regression']
validmind.data_validation.FeatureTargetCorrelationPlotFeature Target Correlation PlotVisualizes the correlation between input features and the model's target output in a color-coded horizontal bar...TrueFalse['dataset']{'fig_height': {'type': '_empty', 'default': 600}}['tabular_data', 'visualization', 'correlation']['classification', 'regression']
validmind.data_validation.HighCardinalityHigh CardinalityAssesses the number of unique values in categorical columns to detect high cardinality and potential overfitting....FalseTrue['dataset']{'num_threshold': {'type': 'int', 'default': 100}, 'percent_threshold': {'type': 'float', 'default': 0.1}, 'threshold_type': {'type': 'str', 'default': 'percent'}}['tabular_data', 'data_quality', 'categorical_data']['classification', 'regression']
validmind.data_validation.HighPearsonCorrelationHigh Pearson CorrelationIdentifies highly correlated feature pairs in a dataset suggesting feature redundancy or multicollinearity....FalseTrue['dataset']{'max_threshold': {'type': 'float', 'default': 0.3}, 'top_n_correlations': {'type': 'int', 'default': 10}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'data_quality', 'correlation']['classification', 'regression']
validmind.data_validation.IQROutliersBarPlotIQR Outliers Bar PlotVisualizes outlier distribution across percentiles in numerical data using the Interquartile Range (IQR) method....TrueFalse['dataset']{'threshold': {'type': 'float', 'default': 1.5}, 'fig_width': {'type': 'int', 'default': 800}}['tabular_data', 'visualization', 'numerical_data']['classification', 'regression']
validmind.data_validation.IQROutliersTableIQR Outliers TableDetermines and summarizes outliers in numerical features using the Interquartile Range method....FalseTrue['dataset']{'threshold': {'type': 'float', 'default': 1.5}}['tabular_data', 'numerical_data']['classification', 'regression']
validmind.data_validation.IsolationForestOutliersIsolation Forest OutliersDetects outliers in a dataset using the Isolation Forest algorithm and visualizes results through scatter plots....TrueFalse['dataset']{'random_state': {'type': 'int', 'default': 0}, 'contamination': {'type': 'float', 'default': 0.1}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'anomaly_detection']['classification']
validmind.data_validation.JarqueBeraJarque BeraAssesses normality of dataset features in an ML model using the Jarque-Bera test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.MissingValuesMissing ValuesEvaluates dataset quality by ensuring missing value ratio across all features does not exceed a set threshold....FalseTrue['dataset']{'min_threshold': {'type': 'int', 'default': 1}}['tabular_data', 'data_quality']['classification', 'regression']
validmind.data_validation.MissingValuesBarPlotMissing Values Bar PlotAssesses the percentage and distribution of missing values in the dataset via a bar plot, with emphasis on...TrueFalse['dataset']{'threshold': {'type': 'int', 'default': 80}, 'fig_height': {'type': 'int', 'default': 600}}['tabular_data', 'data_quality', 'visualization']['classification', 'regression']
validmind.data_validation.MutualInformationMutual InformationCalculates mutual information scores between features and target variable to evaluate feature relevance....TrueFalse['dataset']{'min_threshold': {'type': 'float', 'default': 0.01}, 'task': {'type': 'str', 'default': 'classification'}}['feature_selection', 'data_analysis']['classification', 'regression']
validmind.data_validation.PearsonCorrelationMatrixPearson Correlation MatrixEvaluates linear dependency between numerical variables in a dataset via a Pearson Correlation coefficient heat map....TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'correlation']['classification', 'regression']
validmind.data_validation.ProtectedClassesDescriptionProtected Classes DescriptionVisualizes the distribution of protected classes in the dataset relative to the target variable...TrueTrue['dataset']{'protected_classes': {'type': '_empty', 'default': None}}['bias_and_fairness', 'descriptive_statistics']['classification', 'regression']
validmind.data_validation.RunsTestRuns TestExecutes Runs Test on ML model to detect non-random patterns in output data sequence....FalseTrue['dataset']{}['tabular_data', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.ScatterPlotScatter PlotAssesses visual relationships, patterns, and outliers among features in a dataset through scatter plot matrices....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.ScoreBandDefaultRatesScore Band Default RatesAnalyzes default rates and population distribution across credit score bands....FalseTrue['dataset', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.data_validation.ShapiroWilkShapiro WilkEvaluates feature-wise normality of training data using the Shapiro-Wilk test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test']['classification', 'regression']
validmind.data_validation.SkewnessSkewnessEvaluates the skewness of numerical data in a dataset to check against a defined threshold, aiming to ensure data...FalseTrue['dataset']{'max_threshold': {'type': '_empty', 'default': 1}}['data_quality', 'tabular_data']['classification', 'regression']
validmind.data_validation.TabularCategoricalBarPlotsTabular Categorical Bar PlotsGenerates and visualizes bar plots for each category in categorical features to evaluate the dataset's composition....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDateTimeHistogramsTabular Date Time HistogramsGenerates histograms to provide graphical insight into the distribution of time intervals in a model's datetime...TrueFalse['dataset']{}['time_series_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDescriptionTablesTabular Description TablesSummarizes key descriptive statistics for numerical, categorical, and datetime variables in a dataset....FalseTrue['dataset']{}['tabular_data']['classification', 'regression']
validmind.data_validation.TabularNumericalHistogramsTabular Numerical HistogramsGenerates histograms for each numerical feature in a dataset to provide visual insights into data distribution and...TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TargetRateBarPlotsTarget Rate Bar PlotsGenerates bar plots visualizing the default rates of categorical features for a classification machine learning...TrueFalse['dataset']{}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.TooManyZeroValuesToo Many Zero ValuesIdentifies numerical columns in a dataset that contain an excessive number of zero values, defined by a threshold...FalseTrue['dataset']{'max_percent_threshold': {'type': 'float', 'default': 0.03}}['tabular_data']['regression', 'classification']
validmind.data_validation.UniqueRowsUnique RowsVerifies the diversity of the dataset by ensuring that the count of unique rows exceeds a prescribed threshold....FalseTrue['dataset']{'min_percent_threshold': {'type': 'float', 'default': 1}}['tabular_data']['regression', 'classification']
validmind.data_validation.WOEBinPlotsWOE Bin PlotsGenerates visualizations of Weight of Evidence (WoE) and Information Value (IV) for understanding predictive power...TrueFalse['dataset']{'breaks_adj': {'type': 'list', 'default': None}, 'fig_height': {'type': 'int', 'default': 600}, 'fig_width': {'type': 'int', 'default': 500}}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.WOEBinTableWOE Bin TableAssesses the Weight of Evidence (WoE) and Information Value (IV) of each feature to evaluate its predictive power...FalseTrue['dataset']{'breaks_adj': {'type': 'list', 'default': None}}['tabular_data', 'categorical_data']['classification']
validmind.model_validation.FeaturesAUCFeatures AUCEvaluates the discriminatory power of each individual feature within a binary classification model by calculating...TrueFalse['dataset']{'fontsize': {'type': 'int', 'default': 12}, 'figure_height': {'type': 'int', 'default': 500}}['feature_importance', 'AUC', 'visualization']['classification']
validmind.model_validation.sklearn.CalibrationCurveCalibration CurveEvaluates the calibration of probability estimates by comparing predicted probabilities against observed...TrueFalse['model', 'dataset']{'n_bins': {'type': 'int', 'default': 10}}['sklearn', 'model_performance', 'classification']['classification']
validmind.model_validation.sklearn.ClassifierPerformanceClassifier PerformanceEvaluates performance of binary or multiclass classification models using precision, recall, F1-Score, accuracy,...FalseTrue['dataset', 'model']{'average': {'type': 'str', 'default': 'macro'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ClassifierThresholdOptimizationClassifier Threshold OptimizationAnalyzes and visualizes different threshold optimization methods for binary classification models....FalseTrue['dataset', 'model']{'methods': {'type': None, 'default': None}, 'target_recall': {'type': None, 'default': None}}['model_validation', 'threshold_optimization', 'classification_metrics']['classification']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.HyperParametersTuningHyper Parameters TuningPerforms exhaustive grid search over specified parameter ranges to find optimal model configurations...FalseTrue['model', 'dataset']{'param_grid': {'type': 'dict', 'default': None}, 'scoring': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}, 'fit_params': {'type': 'dict', 'default': None}}['sklearn', 'model_performance']['clustering', 'classification']
validmind.model_validation.sklearn.MinimumAccuracyMinimum AccuracyChecks if the model's prediction accuracy meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.7}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumF1ScoreMinimum F1 ScoreAssesses if the model's F1 score on the validation set meets a predefined minimum threshold, ensuring balanced...FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumROCAUCScoreMinimum ROCAUC ScoreValidates model by checking if the ROC AUC score meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ModelParametersModel ParametersExtracts and displays model parameters in a structured format for transparency and reproducibility....FalseTrue['model']{'model_params': {'type': None, 'default': None}}['model_training', 'metadata']['classification', 'regression']
validmind.model_validation.sklearn.ModelsPerformanceComparisonModels Performance ComparisonEvaluates and compares the performance of multiple Machine Learning models using various metrics like accuracy,...FalseTrue['dataset', 'models']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'model_comparison']['classification', 'text_classification']
validmind.model_validation.sklearn.OverfitDiagnosisOverfit DiagnosisAssesses potential overfitting in a model's predictions, identifying regions where performance between training and...TrueTrue['model', 'datasets']{'metric': {'type': 'str', 'default': None}, 'cut_off_threshold': {'type': 'float', 'default': 0.04}}['sklearn', 'binary_classification', 'multiclass_classification', 'linear_regression', 'model_diagnosis']['classification', 'regression']
validmind.model_validation.sklearn.PermutationFeatureImportancePermutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['model', 'dataset']{'fontsize': {'type': None, 'default': None}, 'figure_height': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PopulationStabilityIndexPopulation Stability IndexAssesses the Population Stability Index (PSI) to quantify the stability of an ML model's predictions across...TrueTrue['datasets', 'model']{'num_bins': {'type': 'int', 'default': 10}, 'mode': {'type': 'str', 'default': 'fixed'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.RegressionErrorsRegression ErrorsAssesses the performance and error distribution of a regression model using various error metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression', 'classification']
validmind.model_validation.sklearn.RobustnessDiagnosisRobustness DiagnosisAssesses the robustness of a machine learning model by evaluating performance decay under noisy conditions....TrueTrue['datasets', 'model']{'metric': {'type': 'str', 'default': None}, 'scaling_factor_std_dev_list': {'type': None, 'default': [0.1, 0.2, 0.3, 0.4, 0.5]}, 'performance_decay_threshold': {'type': 'float', 'default': 0.05}}['sklearn', 'model_diagnosis', 'visualization']['classification', 'regression']
validmind.model_validation.sklearn.SHAPGlobalImportanceSHAP Global ImportanceEvaluates and visualizes global feature importance using SHAP values for model explanation and risk identification....FalseTrue['model', 'dataset']{'kernel_explainer_samples': {'type': 'int', 'default': 10}, 'tree_or_linear_explainer_samples': {'type': 'int', 'default': 200}, 'class_of_interest': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ScoreProbabilityAlignmentScore Probability AlignmentAnalyzes the alignment between credit scores and predicted probabilities....TrueTrue['model', 'dataset']{'score_column': {'type': 'str', 'default': 'score'}, 'n_bins': {'type': 'int', 'default': 10}}['visualization', 'credit_risk', 'calibration']['classification']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.WeakspotsDiagnosisWeakspots DiagnosisIdentifies and visualizes weak spots in a machine learning model's performance across various sections of the...TrueTrue['datasets', 'model']{'features_columns': {'type': None, 'default': None}, 'metrics': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_diagnosis', 'visualization']['classification', 'text_classification']
validmind.model_validation.statsmodels.CumulativePredictionProbabilitiesCumulative Prediction ProbabilitiesVisualizes cumulative probabilities of positive and negative classes for both training and testing in classification models....TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Cumulative Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.GINITableGINI TableEvaluates classification model performance using AUC, GINI, and KS metrics for training and test datasets....FalseTrue['dataset', 'model']{}['model_performance']['classification']
validmind.model_validation.statsmodels.KolmogorovSmirnovKolmogorov SmirnovAssesses whether each feature in the dataset aligns with a normal distribution using the Kolmogorov-Smirnov test....FalseTrue['model', 'dataset']{'dist': {'type': 'str', 'default': 'norm'}}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.LillieforsLillieforsAssesses the normality of feature distributions in an ML model's training dataset using the Lilliefors test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.PredictionProbabilitiesHistogramPrediction Probabilities HistogramAssesses the predictive probability distribution for binary classification to evaluate model performance and...TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Histogram of Predictive Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.ScorecardHistogramScorecard HistogramThe Scorecard Histogram test evaluates the distribution of credit scores between default and non-default instances,...TrueFalse['dataset']{'title': {'type': 'str', 'default': 'Histogram of Scores'}, 'score_column': {'type': 'str', 'default': 'score'}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassDiscriminationDriftClass Discrimination DriftCompares classification discrimination metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassImbalanceDriftClass Imbalance DriftEvaluates drift in class distribution between reference and monitoring datasets....TrueTrue['datasets']{'drift_pct_threshold': {'type': 'float', 'default': 5.0}, 'title': {'type': 'str', 'default': 'Class Distribution Drift'}}['tabular_data', 'binary_classification', 'multiclass_classification']['classification']
validmind.ongoing_monitoring.ClassificationAccuracyDriftClassification Accuracy DriftCompares classification accuracy metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ConfusionMatrixDriftConfusion Matrix DriftCompares confusion matrix metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.CumulativePredictionProbabilitiesDriftCumulative Prediction Probabilities DriftCompares cumulative prediction probability distributions between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.PredictionProbabilitiesHistogramDriftPrediction Probabilities Histogram DriftCompares prediction probability distributions between reference and monitoring datasets....TrueTrue['datasets', 'model']{'title': {'type': '_empty', 'default': 'Prediction Probabilities Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ScoreBandsDriftScore Bands DriftAnalyzes drift in population distribution and default rates across score bands....FalseTrue['datasets', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}, 'drift_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.ongoing_monitoring.ScorecardHistogramDriftScorecard Histogram DriftCompares score distributions between reference and monitoring datasets for each class....TrueTrue['datasets']{'score_column': {'type': 'str', 'default': 'score'}, 'title': {'type': 'str', 'default': 'Scorecard Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.unit_metrics.classification.AccuracyAccuracyCalculates the accuracy of a modelFalseFalse['dataset', 'model']{}['classification']['classification']
validmind.unit_metrics.classification.F1F1Calculates the F1 score for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.PrecisionPrecisionCalculates the precision for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.ROC_AUCROC AUCCalculates the ROC AUC for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.RecallRecallCalculates the recall for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.model_validation.ClusterSizeDistributionCluster Size DistributionAssesses the performance of clustering models by comparing the distribution of cluster sizes in model predictions...TrueFalse['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.TimeSeriesR2SquareBySegmentsTime Series R2 Square By SegmentsEvaluates the R-Squared values of regression models over specified time segments in time series data to assess...TrueTrue['dataset', 'model']{'segments': {'type': None, 'default': None}}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.AdjustedMutualInformationAdjusted Mutual InformationEvaluates clustering model performance by measuring mutual information between true and predicted labels, adjusting...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.AdjustedRandIndexAdjusted Rand IndexMeasures the similarity between two data clusters using the Adjusted Rand Index (ARI) metric in clustering machine...FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CalibrationCurveCalibration CurveEvaluates the calibration of probability estimates by comparing predicted probabilities against observed...TrueFalse['model', 'dataset']{'n_bins': {'type': 'int', 'default': 10}}['sklearn', 'model_performance', 'classification']['classification']
validmind.model_validation.sklearn.ClassifierPerformanceClassifier PerformanceEvaluates performance of binary or multiclass classification models using precision, recall, F1-Score, accuracy,...FalseTrue['dataset', 'model']{'average': {'type': 'str', 'default': 'macro'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ClassifierThresholdOptimizationClassifier Threshold OptimizationAnalyzes and visualizes different threshold optimization methods for binary classification models....FalseTrue['dataset', 'model']{'methods': {'type': None, 'default': None}, 'target_recall': {'type': None, 'default': None}}['model_validation', 'threshold_optimization', 'classification_metrics']['classification']
validmind.model_validation.sklearn.ClusterCosineSimilarityCluster Cosine SimilarityMeasures the intra-cluster similarity of a clustering model using cosine similarity....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ClusterPerformanceMetricsCluster Performance MetricsEvaluates the performance of clustering machine learning models using multiple established metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.CompletenessScoreCompleteness ScoreEvaluates a clustering model's capacity to categorize instances from a single class into the same cluster....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance', 'clustering']['clustering']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.FeatureImportanceFeature ImportanceCompute feature importance scores for a given model and generate a summary table...FalseTrue['dataset', 'model']{'num_features': {'type': 'int', 'default': 3}}['model_explainability', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.FowlkesMallowsScoreFowlkes Mallows ScoreEvaluates the similarity between predicted and actual cluster assignments in a model using the Fowlkes-Mallows...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HomogeneityScoreHomogeneity ScoreAssesses clustering homogeneity by comparing true and predicted labels, scoring from 0 (heterogeneous) to 1...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.HyperParametersTuningHyper Parameters TuningPerforms exhaustive grid search over specified parameter ranges to find optimal model configurations...FalseTrue['model', 'dataset']{'param_grid': {'type': 'dict', 'default': None}, 'scoring': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}, 'fit_params': {'type': 'dict', 'default': None}}['sklearn', 'model_performance']['clustering', 'classification']
validmind.model_validation.sklearn.KMeansClustersOptimizationK Means Clusters OptimizationOptimizes the number of clusters in K-means models using Elbow and Silhouette methods....TrueFalse['model', 'dataset']{'n_clusters': {'type': None, 'default': None}}['sklearn', 'model_performance', 'kmeans']['clustering']
validmind.model_validation.sklearn.MinimumAccuracyMinimum AccuracyChecks if the model's prediction accuracy meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.7}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumF1ScoreMinimum F1 ScoreAssesses if the model's F1 score on the validation set meets a predefined minimum threshold, ensuring balanced...FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumROCAUCScoreMinimum ROCAUC ScoreValidates model by checking if the ROC AUC score meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ModelParametersModel ParametersExtracts and displays model parameters in a structured format for transparency and reproducibility....FalseTrue['model']{'model_params': {'type': None, 'default': None}}['model_training', 'metadata']['classification', 'regression']
validmind.model_validation.sklearn.ModelsPerformanceComparisonModels Performance ComparisonEvaluates and compares the performance of multiple Machine Learning models using various metrics like accuracy,...FalseTrue['dataset', 'models']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'model_comparison']['classification', 'text_classification']
validmind.model_validation.sklearn.OverfitDiagnosisOverfit DiagnosisAssesses potential overfitting in a model's predictions, identifying regions where performance between training and...TrueTrue['model', 'datasets']{'metric': {'type': 'str', 'default': None}, 'cut_off_threshold': {'type': 'float', 'default': 0.04}}['sklearn', 'binary_classification', 'multiclass_classification', 'linear_regression', 'model_diagnosis']['classification', 'regression']
validmind.model_validation.sklearn.PermutationFeatureImportancePermutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['model', 'dataset']{'fontsize': {'type': None, 'default': None}, 'figure_height': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PopulationStabilityIndexPopulation Stability IndexAssesses the Population Stability Index (PSI) to quantify the stability of an ML model's predictions across...TrueTrue['datasets', 'model']{'num_bins': {'type': 'int', 'default': 10}, 'mode': {'type': 'str', 'default': 'fixed'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.RegressionErrorsRegression ErrorsAssesses the performance and error distribution of a regression model using various error metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression', 'classification']
validmind.model_validation.sklearn.RegressionErrorsComparisonRegression Errors ComparisonAssesses multiple regression error metrics to compare model performance across different datasets, emphasizing...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RegressionPerformanceRegression PerformanceEvaluates the performance of a regression model using five different metrics: MAE, MSE, RMSE, MAPE, and MBD....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareRegression R2 SquareAssesses the overall goodness-of-fit of a regression model by evaluating R-squared (R2) and Adjusted R-squared (Adj...FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['regression']
validmind.model_validation.sklearn.RegressionR2SquareComparisonRegression R2 Square ComparisonCompares R-Squared and Adjusted R-Squared values for different regression models across multiple datasets to assess...FalseTrue['datasets', 'models']{}['model_performance', 'sklearn']['regression', 'time_series_forecasting']
validmind.model_validation.sklearn.RobustnessDiagnosisRobustness DiagnosisAssesses the robustness of a machine learning model by evaluating performance decay under noisy conditions....TrueTrue['datasets', 'model']{'metric': {'type': 'str', 'default': None}, 'scaling_factor_std_dev_list': {'type': None, 'default': [0.1, 0.2, 0.3, 0.4, 0.5]}, 'performance_decay_threshold': {'type': 'float', 'default': 0.05}}['sklearn', 'model_diagnosis', 'visualization']['classification', 'regression']
validmind.model_validation.sklearn.SHAPGlobalImportanceSHAP Global ImportanceEvaluates and visualizes global feature importance using SHAP values for model explanation and risk identification....FalseTrue['model', 'dataset']{'kernel_explainer_samples': {'type': 'int', 'default': 10}, 'tree_or_linear_explainer_samples': {'type': 'int', 'default': 200}, 'class_of_interest': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ScoreProbabilityAlignmentScore Probability AlignmentAnalyzes the alignment between credit scores and predicted probabilities....TrueTrue['model', 'dataset']{'score_column': {'type': 'str', 'default': 'score'}, 'n_bins': {'type': 'int', 'default': 10}}['visualization', 'credit_risk', 'calibration']['classification']
validmind.model_validation.sklearn.SilhouettePlotSilhouette PlotCalculates and visualizes Silhouette Score, assessing the degree of data point suitability to its cluster in ML...TrueTrue['model', 'dataset']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.VMeasureV MeasureEvaluates homogeneity and completeness of a clustering model using the V Measure Score....FalseTrue['dataset', 'model']{}['sklearn', 'model_performance']['clustering']
validmind.model_validation.sklearn.WeakspotsDiagnosisWeakspots DiagnosisIdentifies and visualizes weak spots in a machine learning model's performance across various sections of the...TrueTrue['datasets', 'model']{'features_columns': {'type': None, 'default': None}, 'metrics': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_diagnosis', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassDiscriminationDriftClass Discrimination DriftCompares classification discrimination metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassificationAccuracyDriftClassification Accuracy DriftCompares classification accuracy metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ConfusionMatrixDriftConfusion Matrix DriftCompares confusion matrix metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
\n" ], - "source": [ - "list_tests(task=\"classification\")" + "text/plain": [ + "" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use the `tags` parameter to find tests based on their tags, such as `model_performance` or `visualization`:" - ] - }, + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tests(filter=\"sklearn\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the `task` parameter to find tests that match a specific task type, such as `classification`:\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.model_validation.RegressionResidualsPlotRegression Residuals PlotEvaluates regression model performance using residual distribution and actual vs. predicted plots....TrueFalse['model', 'dataset']{'bin_size': {'type': 'float', 'default': 0.1}}['model_performance', 'visualization']['regression']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.data_validation.BivariateScatterPlotsBivariate Scatter PlotsGenerates bivariate scatterplots to visually inspect relationships between pairs of numerical predictor variables...TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'visualization']['classification']
validmind.data_validation.ChiSquaredFeaturesTableChi Squared Features TableAssesses the statistical association between categorical features and a target variable using the Chi-Squared test....FalseTrue['dataset']{'p_threshold': {'type': '_empty', 'default': 0.05}}['tabular_data', 'categorical_data', 'statistical_test']['classification']
validmind.data_validation.ClassImbalanceClass ImbalanceEvaluates and quantifies class distribution imbalance in a dataset used by a machine learning model....TrueTrue['dataset']{'min_percent_threshold': {'type': 'int', 'default': 10}}['tabular_data', 'binary_classification', 'multiclass_classification', 'data_quality']['classification']
validmind.data_validation.DatasetDescriptionDataset DescriptionProvides comprehensive analysis and statistical summaries of each column in a machine learning model's dataset....FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DatasetSplitDataset SplitEvaluates and visualizes the distribution proportions among training, testing, and validation datasets of an ML...FalseTrue['datasets']{}['tabular_data', 'time_series_data', 'text_data']['classification', 'regression', 'text_classification', 'text_summarization']
validmind.data_validation.DescriptiveStatisticsDescriptive StatisticsPerforms a detailed descriptive statistical analysis of both numerical and categorical data within a model's...FalseTrue['dataset']{}['tabular_data', 'time_series_data', 'data_quality']['classification', 'regression']
validmind.data_validation.DuplicatesDuplicatesTests dataset for duplicate entries, ensuring model reliability via data quality verification....FalseTrue['dataset']{'min_threshold': {'type': '_empty', 'default': 1}}['tabular_data', 'data_quality', 'text_data']['classification', 'regression']
validmind.data_validation.FeatureTargetCorrelationPlotFeature Target Correlation PlotVisualizes the correlation between input features and the model's target output in a color-coded horizontal bar...TrueFalse['dataset']{'fig_height': {'type': '_empty', 'default': 600}}['tabular_data', 'visualization', 'correlation']['classification', 'regression']
validmind.data_validation.HighCardinalityHigh CardinalityAssesses the number of unique values in categorical columns to detect high cardinality and potential overfitting....FalseTrue['dataset']{'num_threshold': {'type': 'int', 'default': 100}, 'percent_threshold': {'type': 'float', 'default': 0.1}, 'threshold_type': {'type': 'str', 'default': 'percent'}}['tabular_data', 'data_quality', 'categorical_data']['classification', 'regression']
validmind.data_validation.HighPearsonCorrelationHigh Pearson CorrelationIdentifies highly correlated feature pairs in a dataset suggesting feature redundancy or multicollinearity....FalseTrue['dataset']{'max_threshold': {'type': 'float', 'default': 0.3}, 'top_n_correlations': {'type': 'int', 'default': 10}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'data_quality', 'correlation']['classification', 'regression']
validmind.data_validation.IQROutliersBarPlotIQR Outliers Bar PlotVisualizes outlier distribution across percentiles in numerical data using the Interquartile Range (IQR) method....TrueFalse['dataset']{'threshold': {'type': 'float', 'default': 1.5}, 'fig_width': {'type': 'int', 'default': 800}}['tabular_data', 'visualization', 'numerical_data']['classification', 'regression']
validmind.data_validation.IQROutliersTableIQR Outliers TableDetermines and summarizes outliers in numerical features using the Interquartile Range method....FalseTrue['dataset']{'threshold': {'type': 'float', 'default': 1.5}}['tabular_data', 'numerical_data']['classification', 'regression']
validmind.data_validation.IsolationForestOutliersIsolation Forest OutliersDetects outliers in a dataset using the Isolation Forest algorithm and visualizes results through scatter plots....TrueFalse['dataset']{'random_state': {'type': 'int', 'default': 0}, 'contamination': {'type': 'float', 'default': 0.1}, 'feature_columns': {'type': 'list', 'default': None}}['tabular_data', 'anomaly_detection']['classification']
validmind.data_validation.JarqueBeraJarque BeraAssesses normality of dataset features in an ML model using the Jarque-Bera test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.MissingValuesMissing ValuesEvaluates dataset quality by ensuring missing value ratio across all features does not exceed a set threshold....FalseTrue['dataset']{'min_threshold': {'type': 'int', 'default': 1}}['tabular_data', 'data_quality']['classification', 'regression']
validmind.data_validation.MissingValuesBarPlotMissing Values Bar PlotAssesses the percentage and distribution of missing values in the dataset via a bar plot, with emphasis on...TrueFalse['dataset']{'threshold': {'type': 'int', 'default': 80}, 'fig_height': {'type': 'int', 'default': 600}}['tabular_data', 'data_quality', 'visualization']['classification', 'regression']
validmind.data_validation.MutualInformationMutual InformationCalculates mutual information scores between features and target variable to evaluate feature relevance....TrueFalse['dataset']{'min_threshold': {'type': 'float', 'default': 0.01}, 'task': {'type': 'str', 'default': 'classification'}}['feature_selection', 'data_analysis']['classification', 'regression']
validmind.data_validation.PearsonCorrelationMatrixPearson Correlation MatrixEvaluates linear dependency between numerical variables in a dataset via a Pearson Correlation coefficient heat map....TrueFalse['dataset']{}['tabular_data', 'numerical_data', 'correlation']['classification', 'regression']
validmind.data_validation.ProtectedClassesDescriptionProtected Classes DescriptionVisualizes the distribution of protected classes in the dataset relative to the target variable...TrueTrue['dataset']{'protected_classes': {'type': '_empty', 'default': None}}['bias_and_fairness', 'descriptive_statistics']['classification', 'regression']
validmind.data_validation.RunsTestRuns TestExecutes Runs Test on ML model to detect non-random patterns in output data sequence....FalseTrue['dataset']{}['tabular_data', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.data_validation.ScatterPlotScatter PlotAssesses visual relationships, patterns, and outliers among features in a dataset through scatter plot matrices....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.ScoreBandDefaultRatesScore Band Default RatesAnalyzes default rates and population distribution across credit score bands....FalseTrue['dataset', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.data_validation.ShapiroWilkShapiro WilkEvaluates feature-wise normality of training data using the Shapiro-Wilk test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test']['classification', 'regression']
validmind.data_validation.SkewnessSkewnessEvaluates the skewness of numerical data in a dataset to check against a defined threshold, aiming to ensure data...FalseTrue['dataset']{'max_threshold': {'type': '_empty', 'default': 1}}['data_quality', 'tabular_data']['classification', 'regression']
validmind.data_validation.TabularCategoricalBarPlotsTabular Categorical Bar PlotsGenerates and visualizes bar plots for each category in categorical features to evaluate the dataset's composition....TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDateTimeHistogramsTabular Date Time HistogramsGenerates histograms to provide graphical insight into the distribution of time intervals in a model's datetime...TrueFalse['dataset']{}['time_series_data', 'visualization']['classification', 'regression']
validmind.data_validation.TabularDescriptionTablesTabular Description TablesSummarizes key descriptive statistics for numerical, categorical, and datetime variables in a dataset....FalseTrue['dataset']{}['tabular_data']['classification', 'regression']
validmind.data_validation.TabularNumericalHistogramsTabular Numerical HistogramsGenerates histograms for each numerical feature in a dataset to provide visual insights into data distribution and...TrueFalse['dataset']{}['tabular_data', 'visualization']['classification', 'regression']
validmind.data_validation.TargetRateBarPlotsTarget Rate Bar PlotsGenerates bar plots visualizing the default rates of categorical features for a classification machine learning...TrueFalse['dataset']{}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.TooManyZeroValuesToo Many Zero ValuesIdentifies numerical columns in a dataset that contain an excessive number of zero values, defined by a threshold...FalseTrue['dataset']{'max_percent_threshold': {'type': 'float', 'default': 0.03}}['tabular_data']['regression', 'classification']
validmind.data_validation.UniqueRowsUnique RowsVerifies the diversity of the dataset by ensuring that the count of unique rows exceeds a prescribed threshold....FalseTrue['dataset']{'min_percent_threshold': {'type': 'float', 'default': 1}}['tabular_data']['regression', 'classification']
validmind.data_validation.WOEBinPlotsWOE Bin PlotsGenerates visualizations of Weight of Evidence (WoE) and Information Value (IV) for understanding predictive power...TrueFalse['dataset']{'breaks_adj': {'type': 'list', 'default': None}, 'fig_height': {'type': 'int', 'default': 600}, 'fig_width': {'type': 'int', 'default': 500}}['tabular_data', 'visualization', 'categorical_data']['classification']
validmind.data_validation.WOEBinTableWOE Bin TableAssesses the Weight of Evidence (WoE) and Information Value (IV) of each feature to evaluate its predictive power...FalseTrue['dataset']{'breaks_adj': {'type': 'list', 'default': None}}['tabular_data', 'categorical_data']['classification']
validmind.model_validation.FeaturesAUCFeatures AUCEvaluates the discriminatory power of each individual feature within a binary classification model by calculating...TrueFalse['dataset']{'fontsize': {'type': 'int', 'default': 12}, 'figure_height': {'type': 'int', 'default': 500}}['feature_importance', 'AUC', 'visualization']['classification']
validmind.model_validation.sklearn.CalibrationCurveCalibration CurveEvaluates the calibration of probability estimates by comparing predicted probabilities against observed...TrueFalse['model', 'dataset']{'n_bins': {'type': 'int', 'default': 10}}['sklearn', 'model_performance', 'classification']['classification']
validmind.model_validation.sklearn.ClassifierPerformanceClassifier PerformanceEvaluates performance of binary or multiclass classification models using precision, recall, F1-Score, accuracy,...FalseTrue['dataset', 'model']{'average': {'type': 'str', 'default': 'macro'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ClassifierThresholdOptimizationClassifier Threshold OptimizationAnalyzes and visualizes different threshold optimization methods for binary classification models....FalseTrue['dataset', 'model']{'methods': {'type': None, 'default': None}, 'target_recall': {'type': None, 'default': None}}['model_validation', 'threshold_optimization', 'classification_metrics']['classification']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.HyperParametersTuningHyper Parameters TuningPerforms exhaustive grid search over specified parameter ranges to find optimal model configurations...FalseTrue['model', 'dataset']{'param_grid': {'type': 'dict', 'default': None}, 'scoring': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}, 'fit_params': {'type': 'dict', 'default': None}}['sklearn', 'model_performance']['clustering', 'classification']
validmind.model_validation.sklearn.MinimumAccuracyMinimum AccuracyChecks if the model's prediction accuracy meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.7}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumF1ScoreMinimum F1 ScoreAssesses if the model's F1 score on the validation set meets a predefined minimum threshold, ensuring balanced...FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.MinimumROCAUCScoreMinimum ROCAUC ScoreValidates model by checking if the ROC AUC score meets or surpasses a specified threshold....FalseTrue['dataset', 'model']{'min_threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.ModelParametersModel ParametersExtracts and displays model parameters in a structured format for transparency and reproducibility....FalseTrue['model']{'model_params': {'type': None, 'default': None}}['model_training', 'metadata']['classification', 'regression']
validmind.model_validation.sklearn.ModelsPerformanceComparisonModels Performance ComparisonEvaluates and compares the performance of multiple Machine Learning models using various metrics like accuracy,...FalseTrue['dataset', 'models']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'model_comparison']['classification', 'text_classification']
validmind.model_validation.sklearn.OverfitDiagnosisOverfit DiagnosisAssesses potential overfitting in a model's predictions, identifying regions where performance between training and...TrueTrue['model', 'datasets']{'metric': {'type': 'str', 'default': None}, 'cut_off_threshold': {'type': 'float', 'default': 0.04}}['sklearn', 'binary_classification', 'multiclass_classification', 'linear_regression', 'model_diagnosis']['classification', 'regression']
validmind.model_validation.sklearn.PermutationFeatureImportancePermutation Feature ImportanceAssesses the significance of each feature in a model by evaluating the impact on model performance when feature...TrueFalse['model', 'dataset']{'fontsize': {'type': None, 'default': None}, 'figure_height': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PopulationStabilityIndexPopulation Stability IndexAssesses the Population Stability Index (PSI) to quantify the stability of an ML model's predictions across...TrueTrue['datasets', 'model']{'num_bins': {'type': 'int', 'default': 10}, 'mode': {'type': 'str', 'default': 'fixed'}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.RegressionErrorsRegression ErrorsAssesses the performance and error distribution of a regression model using various error metrics....FalseTrue['model', 'dataset']{}['sklearn', 'model_performance']['regression', 'classification']
validmind.model_validation.sklearn.RobustnessDiagnosisRobustness DiagnosisAssesses the robustness of a machine learning model by evaluating performance decay under noisy conditions....TrueTrue['datasets', 'model']{'metric': {'type': 'str', 'default': None}, 'scaling_factor_std_dev_list': {'type': None, 'default': [0.1, 0.2, 0.3, 0.4, 0.5]}, 'performance_decay_threshold': {'type': 'float', 'default': 0.05}}['sklearn', 'model_diagnosis', 'visualization']['classification', 'regression']
validmind.model_validation.sklearn.SHAPGlobalImportanceSHAP Global ImportanceEvaluates and visualizes global feature importance using SHAP values for model explanation and risk identification....FalseTrue['model', 'dataset']{'kernel_explainer_samples': {'type': 'int', 'default': 10}, 'tree_or_linear_explainer_samples': {'type': 'int', 'default': 200}, 'class_of_interest': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'feature_importance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ScoreProbabilityAlignmentScore Probability AlignmentAnalyzes the alignment between credit scores and predicted probabilities....TrueTrue['model', 'dataset']{'score_column': {'type': 'str', 'default': 'score'}, 'n_bins': {'type': 'int', 'default': 10}}['visualization', 'credit_risk', 'calibration']['classification']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.WeakspotsDiagnosisWeakspots DiagnosisIdentifies and visualizes weak spots in a machine learning model's performance across various sections of the...TrueTrue['datasets', 'model']{'features_columns': {'type': None, 'default': None}, 'metrics': {'type': None, 'default': None}, 'thresholds': {'type': None, 'default': None}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_diagnosis', 'visualization']['classification', 'text_classification']
validmind.model_validation.statsmodels.CumulativePredictionProbabilitiesCumulative Prediction ProbabilitiesVisualizes cumulative probabilities of positive and negative classes for both training and testing in classification models....TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Cumulative Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.GINITableGINI TableEvaluates classification model performance using AUC, GINI, and KS metrics for training and test datasets....FalseTrue['dataset', 'model']{}['model_performance']['classification']
validmind.model_validation.statsmodels.KolmogorovSmirnovKolmogorov SmirnovAssesses whether each feature in the dataset aligns with a normal distribution using the Kolmogorov-Smirnov test....FalseTrue['model', 'dataset']{'dist': {'type': 'str', 'default': 'norm'}}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.LillieforsLillieforsAssesses the normality of feature distributions in an ML model's training dataset using the Lilliefors test....FalseTrue['dataset']{}['tabular_data', 'data_distribution', 'statistical_test', 'statsmodels']['classification', 'regression']
validmind.model_validation.statsmodels.PredictionProbabilitiesHistogramPrediction Probabilities HistogramAssesses the predictive probability distribution for binary classification to evaluate model performance and...TrueFalse['dataset', 'model']{'title': {'type': 'str', 'default': 'Histogram of Predictive Probabilities'}}['visualization', 'credit_risk']['classification']
validmind.model_validation.statsmodels.ScorecardHistogramScorecard HistogramThe Scorecard Histogram test evaluates the distribution of credit scores between default and non-default instances,...TrueFalse['dataset']{'title': {'type': 'str', 'default': 'Histogram of Scores'}, 'score_column': {'type': 'str', 'default': 'score'}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassDiscriminationDriftClass Discrimination DriftCompares classification discrimination metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ClassImbalanceDriftClass Imbalance DriftEvaluates drift in class distribution between reference and monitoring datasets....TrueTrue['datasets']{'drift_pct_threshold': {'type': 'float', 'default': 5.0}, 'title': {'type': 'str', 'default': 'Class Distribution Drift'}}['tabular_data', 'binary_classification', 'multiclass_classification']['classification']
validmind.ongoing_monitoring.ClassificationAccuracyDriftClassification Accuracy DriftCompares classification accuracy metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.ConfusionMatrixDriftConfusion Matrix DriftCompares confusion matrix metrics between reference and monitoring datasets....FalseTrue['datasets', 'model']{'drift_pct_threshold': {'type': '_empty', 'default': 20}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance']['classification', 'text_classification']
validmind.ongoing_monitoring.CumulativePredictionProbabilitiesDriftCumulative Prediction Probabilities DriftCompares cumulative prediction probability distributions between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.PredictionProbabilitiesHistogramDriftPrediction Probabilities Histogram DriftCompares prediction probability distributions between reference and monitoring datasets....TrueTrue['datasets', 'model']{'title': {'type': '_empty', 'default': 'Prediction Probabilities Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk']['classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ScoreBandsDriftScore Bands DriftAnalyzes drift in population distribution and default rates across score bands....FalseTrue['datasets', 'model']{'score_column': {'type': 'str', 'default': 'score'}, 'score_bands': {'type': 'list', 'default': None}, 'drift_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'scorecard']['classification']
validmind.ongoing_monitoring.ScorecardHistogramDriftScorecard Histogram DriftCompares score distributions between reference and monitoring datasets for each class....TrueTrue['datasets']{'score_column': {'type': 'str', 'default': 'score'}, 'title': {'type': 'str', 'default': 'Scorecard Histogram Drift'}, 'drift_pct_threshold': {'type': 'float', 'default': 20.0}}['visualization', 'credit_risk', 'logistic_regression']['classification']
validmind.unit_metrics.classification.AccuracyAccuracyCalculates the accuracy of a modelFalseFalse['dataset', 'model']{}['classification']['classification']
validmind.unit_metrics.classification.F1F1Calculates the F1 score for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.PrecisionPrecisionCalculates the precision for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.ROC_AUCROC AUCCalculates the ROC AUC for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
validmind.unit_metrics.classification.RecallRecallCalculates the recall for a classification model.FalseFalse['model', 'dataset']{}['classification']['classification']
\n" ], - "source": [ - "list_tests(tags=[\"model_performance\", \"visualization\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use `filter`, `task`, and `tags` together to create more specific queries.\n", - "\n", - "For example, apply all three to find tests compatible with `sklearn` models, designed for `classification` tasks:" + "text/plain": [ + "" ] - }, + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tests(task=\"classification\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use the `tags` parameter to find tests based on their tags, such as `model_performance` or `visualization`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.model_validation.RegressionResidualsPlotRegression Residuals PlotEvaluates regression model performance using residual distribution and actual vs. predicted plots....TrueFalse['model', 'dataset']{'bin_size': {'type': 'float', 'default': 0.1}}['model_performance', 'visualization']['regression']
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
\n" ], - "source": [ - "list_tests(filter=\"sklearn\",\n", - " tags=[\"model_performance\", \"visualization\"], task=\"classification\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Store test sets for use\n", - "\n", - "Once you've identified specific sets of tests you'd like to run, you can store the tests in variables, enabling you to easily reuse those tests in later steps.\n", - "\n", - "For example, if you're validating a summarization model, use [`list_tests()`](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) to retrieve all tests tagged for text summarization and save them to `text_summarization_tests` for later use:\n" + "text/plain": [ + "" ] - }, + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tests(tags=[\"model_performance\", \"visualization\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use `filter`, `task`, and `tags` together to create more specific queries.\n", + "\n", + "For example, apply all three to find tests compatible with `sklearn` models, designed for `classification` tasks:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['validmind.data_validation.DatasetDescription',\n", - " 'validmind.data_validation.DatasetSplit',\n", - " 'validmind.data_validation.nlp.CommonWords',\n", - " 'validmind.data_validation.nlp.Hashtags',\n", - " 'validmind.data_validation.nlp.LanguageDetection',\n", - " 'validmind.data_validation.nlp.Mentions',\n", - " 'validmind.data_validation.nlp.Punctuations',\n", - " 'validmind.data_validation.nlp.StopWords',\n", - " 'validmind.data_validation.nlp.TextDescription',\n", - " 'validmind.model_validation.BertScore',\n", - " 'validmind.model_validation.BleuScore',\n", - " 'validmind.model_validation.ContextualRecall',\n", - " 'validmind.model_validation.MeteorScore',\n", - " 'validmind.model_validation.RegardScore',\n", - " 'validmind.model_validation.RougeScore',\n", - " 'validmind.model_validation.TokenDisparity',\n", - " 'validmind.model_validation.ToxicityScore',\n", - " 'validmind.model_validation.embeddings.CosineSimilarityComparison',\n", - " 'validmind.model_validation.embeddings.CosineSimilarityHeatmap',\n", - " 'validmind.model_validation.embeddings.EuclideanDistanceComparison',\n", - " 'validmind.model_validation.embeddings.EuclideanDistanceHeatmap',\n", - " 'validmind.model_validation.embeddings.PCAComponentsPairwisePlots',\n", - " 'validmind.model_validation.embeddings.TSNEComponentsPairwisePlots',\n", - " 'validmind.model_validation.ragas.AnswerCorrectness',\n", - " 'validmind.model_validation.ragas.AspectCritic',\n", - " 'validmind.model_validation.ragas.ContextEntityRecall',\n", - " 'validmind.model_validation.ragas.ContextPrecision',\n", - " 'validmind.model_validation.ragas.ContextPrecisionWithoutReference',\n", - " 'validmind.model_validation.ragas.ContextRecall',\n", - " 'validmind.model_validation.ragas.Faithfulness',\n", - " 'validmind.model_validation.ragas.NoiseSensitivity',\n", - " 'validmind.model_validation.ragas.ResponseRelevancy',\n", - " 'validmind.model_validation.ragas.SemanticSimilarity',\n", - " 'validmind.prompt_validation.Bias',\n", - " 'validmind.prompt_validation.Clarity',\n", - " 'validmind.prompt_validation.Conciseness',\n", - " 'validmind.prompt_validation.Delimitation',\n", - " 'validmind.prompt_validation.NegativeInstruction',\n", - " 'validmind.prompt_validation.Robustness',\n", - " 'validmind.prompt_validation.Specificity']" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
IDNameDescriptionHas FigureHas TableRequired InputsParamsTagsTasks
validmind.model_validation.sklearn.ConfusionMatrixConfusion MatrixEvaluates and visually represents the classification ML model's predictive performance using a Confusion Matrix...TrueFalse['dataset', 'model']{'threshold': {'type': 'float', 'default': 0.5}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.PrecisionRecallCurvePrecision Recall CurveEvaluates the precision-recall trade-off for binary classification models and visualizes the Precision-Recall curve....TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.ROCCurveROC CurveEvaluates binary classification model performance by generating and plotting the Receiver Operating Characteristic...TrueFalse['model', 'dataset']{}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.model_validation.sklearn.TrainingTestDegradationTraining Test DegradationTests if model performance degradation between training and test datasets exceeds a predefined threshold....FalseTrue['datasets', 'model']{'max_threshold': {'type': 'float', 'default': 0.1}}['sklearn', 'binary_classification', 'multiclass_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.CalibrationCurveDriftCalibration Curve DriftEvaluates changes in probability calibration between reference and monitoring datasets....TrueTrue['datasets', 'model']{'n_bins': {'type': 'int', 'default': 10}, 'drift_pct_threshold': {'type': 'float', 'default': 20}}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
validmind.ongoing_monitoring.ROCCurveDriftROC Curve DriftCompares ROC curves between reference and monitoring datasets....TrueFalse['datasets', 'model']{}['sklearn', 'binary_classification', 'model_performance', 'visualization']['classification', 'text_classification']
\n" ], - "source": [ - "text_summarization_tests = list_tests(task=\"text_summarization\", pretty=False)\n", - "text_summarization_tests" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "## Next steps\n", - "\n", - "Now that you know how to browse and filter tests in the ValidMind Library, you’re ready to take the next step. Use the test IDs you’ve selected to either run individual tests or batch run them with custom test suites.\n", - "\n", - "
Learn about the tests suites available in the ValidMind Library.\n", - "

\n", - "Check out our Explore test suites notebook for more code examples and usage of key functions.
\n", - "\n", - "\n", - "\n", - "### Discover more learning resources\n", - "\n", - "We offer many interactive notebooks to help you document models:\n", - "\n", - "- [Run tests & test suites](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", - "- [Code samples](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", - "\n", - "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind.\n" + "text/plain": [ + "" ] - }, + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list_tests(filter=\"sklearn\",\n", + " tags=[\"model_performance\", \"visualization\"], task=\"classification\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Store test sets for use\n", + "\n", + "Once you've identified specific sets of tests you'd like to run, you can store the tests in variables, enabling you to easily reuse those tests in later steps.\n", + "\n", + "For example, if you're validating a summarization model, use [`list_tests()`](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) to retrieve all tests tagged for text summarization and save them to `text_summarization_tests` for later use:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\n", - "***\n", - "\n", - "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", - "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + "data": { + "text/plain": [ + "['validmind.data_validation.DatasetDescription',\n", + " 'validmind.data_validation.DatasetSplit',\n", + " 'validmind.data_validation.nlp.CommonWords',\n", + " 'validmind.data_validation.nlp.Hashtags',\n", + " 'validmind.data_validation.nlp.LanguageDetection',\n", + " 'validmind.data_validation.nlp.Mentions',\n", + " 'validmind.data_validation.nlp.Punctuations',\n", + " 'validmind.data_validation.nlp.StopWords',\n", + " 'validmind.data_validation.nlp.TextDescription',\n", + " 'validmind.model_validation.BertScore',\n", + " 'validmind.model_validation.BleuScore',\n", + " 'validmind.model_validation.ContextualRecall',\n", + " 'validmind.model_validation.MeteorScore',\n", + " 'validmind.model_validation.RegardScore',\n", + " 'validmind.model_validation.RougeScore',\n", + " 'validmind.model_validation.TokenDisparity',\n", + " 'validmind.model_validation.ToxicityScore',\n", + " 'validmind.model_validation.embeddings.CosineSimilarityComparison',\n", + " 'validmind.model_validation.embeddings.CosineSimilarityHeatmap',\n", + " 'validmind.model_validation.embeddings.EuclideanDistanceComparison',\n", + " 'validmind.model_validation.embeddings.EuclideanDistanceHeatmap',\n", + " 'validmind.model_validation.embeddings.PCAComponentsPairwisePlots',\n", + " 'validmind.model_validation.embeddings.TSNEComponentsPairwisePlots',\n", + " 'validmind.model_validation.ragas.AnswerCorrectness',\n", + " 'validmind.model_validation.ragas.AspectCritic',\n", + " 'validmind.model_validation.ragas.ContextEntityRecall',\n", + " 'validmind.model_validation.ragas.ContextPrecision',\n", + " 'validmind.model_validation.ragas.ContextPrecisionWithoutReference',\n", + " 'validmind.model_validation.ragas.ContextRecall',\n", + " 'validmind.model_validation.ragas.Faithfulness',\n", + " 'validmind.model_validation.ragas.NoiseSensitivity',\n", + " 'validmind.model_validation.ragas.ResponseRelevancy',\n", + " 'validmind.model_validation.ragas.SemanticSimilarity',\n", + " 'validmind.prompt_validation.Bias',\n", + " 'validmind.prompt_validation.Clarity',\n", + " 'validmind.prompt_validation.Conciseness',\n", + " 'validmind.prompt_validation.Delimitation',\n", + " 'validmind.prompt_validation.NegativeInstruction',\n", + " 'validmind.prompt_validation.Robustness',\n", + " 'validmind.prompt_validation.Specificity']" ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.20" - } + ], + "source": [ + "text_summarization_tests = list_tests(task=\"text_summarization\", pretty=False)\n", + "text_summarization_tests" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps\n", + "\n", + "Now that you know how to browse and filter tests in the ValidMind Library, you’re ready to take the next step. Use the test IDs you’ve selected to either run individual tests or batch run them with custom test suites.\n", + "\n", + "
Learn about the tests suites available in the ValidMind Library.\n", + "

\n", + "Check out our Explore test suites notebook for more code examples and usage of key functions.
\n", + "\n", + "\n", + "\n", + "### Discover more learning resources\n", + "\n", + "We offer many interactive notebooks to help you document models:\n", + "\n", + "- [Run tests & test suites](https://docs.validmind.ai/developer/model-testing/testing-overview.html)\n", + "- [Code samples](https://docs.validmind.ai/developer/samples-jupyter-notebooks.html)\n", + "\n", + "Or, visit our [documentation](https://docs.validmind.ai/) to learn more about ValidMind.\n" + ] + }, + { + "cell_type": "markdown", + "id": "copyright-1b1ae13be5d84b3b84274b9e7789f11b", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "\n", + "***\n", + "\n", + "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 4 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.20" + } + }, + "nbformat": 4, + "nbformat_minor": 4 } diff --git a/site/notebooks/how_to/filter_input_columns.ipynb b/site/notebooks/how_to/filter_input_columns.ipynb index 5c8b2ce093..1f1bb65c59 100644 --- a/site/notebooks/how_to/filter_input_columns.ipynb +++ b/site/notebooks/how_to/filter_input_columns.ipynb @@ -64,14 +64,17 @@ }, { "cell_type": "markdown", + "id": "copyright-814bdeab9bdf4b1c87f3fc2ea3b33fbc", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/load_datasets_predictions.ipynb b/site/notebooks/how_to/load_datasets_predictions.ipynb index c10eb88dc4..a2dacdeea0 100644 --- a/site/notebooks/how_to/load_datasets_predictions.ipynb +++ b/site/notebooks/how_to/load_datasets_predictions.ipynb @@ -1017,14 +1017,17 @@ }, { "cell_type": "markdown", + "id": "copyright-0763ff57c8834b5e80d683d17186580e", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/log_metrics_over_time.ipynb b/site/notebooks/how_to/log_metrics_over_time.ipynb index 53b3ee6ee0..f3bbfbdfa7 100644 --- a/site/notebooks/how_to/log_metrics_over_time.ipynb +++ b/site/notebooks/how_to/log_metrics_over_time.ipynb @@ -919,14 +919,17 @@ }, { "cell_type": "markdown", + "id": "copyright-2b6b06cfa3254951b8ebbc1178037888", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/run_documentation_sections.ipynb b/site/notebooks/how_to/run_documentation_sections.ipynb index 0cfda9183f..fb4412df95 100644 --- a/site/notebooks/how_to/run_documentation_sections.ipynb +++ b/site/notebooks/how_to/run_documentation_sections.ipynb @@ -555,14 +555,17 @@ }, { "cell_type": "markdown", + "id": "copyright-f4756a1f66ab49598b696ed86685fcc6", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/run_documentation_tests_with_config.ipynb b/site/notebooks/how_to/run_documentation_tests_with_config.ipynb index d93dfa534d..a96df1a5b1 100644 --- a/site/notebooks/how_to/run_documentation_tests_with_config.ipynb +++ b/site/notebooks/how_to/run_documentation_tests_with_config.ipynb @@ -680,14 +680,17 @@ }, { "cell_type": "markdown", + "id": "copyright-d1660c97181e4b7e9470b3529ffef83d", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb b/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb index cbab7460b2..a901849c4b 100644 --- a/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb +++ b/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb @@ -567,14 +567,17 @@ }, { "cell_type": "markdown", + "id": "copyright-25b1e88bb9d849c8a1f3fc48aac4b6a4", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb b/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb index 7b8d8878ef..5aa8d78dba 100644 --- a/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb +++ b/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb @@ -751,14 +751,17 @@ }, { "cell_type": "markdown", + "id": "copyright-d655a3bbc6e14b15b7466f3808556ef8", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb b/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb index 0eeb52e62c..d292c4461d 100644 --- a/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb +++ b/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb @@ -532,14 +532,17 @@ }, { "cell_type": "markdown", + "id": "copyright-b40eb05198f94039ae2f66d120320271", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/run_unit_metrics.ipynb b/site/notebooks/how_to/run_unit_metrics.ipynb index 29f367d37b..235c538a98 100644 --- a/site/notebooks/how_to/run_unit_metrics.ipynb +++ b/site/notebooks/how_to/run_unit_metrics.ipynb @@ -756,14 +756,17 @@ }, { "cell_type": "markdown", + "id": "copyright-238ef79bf0f045d9a112ffd540267e96", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } @@ -785,8 +788,7 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.13" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/site/notebooks/how_to/understand_utilize_rawdata.ipynb b/site/notebooks/how_to/understand_utilize_rawdata.ipynb index a48e602a63..5fe3c360de 100644 --- a/site/notebooks/how_to/understand_utilize_rawdata.ipynb +++ b/site/notebooks/how_to/understand_utilize_rawdata.ipynb @@ -702,15 +702,17 @@ }, { "cell_type": "markdown", - "id": "16d292fe", + "id": "copyright-d9a502e868ba4fc1a70056873609b472", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/how_to/use_dataset_model_objects.ipynb b/site/notebooks/how_to/use_dataset_model_objects.ipynb index b491ebb412..12431963c5 100644 --- a/site/notebooks/how_to/use_dataset_model_objects.ipynb +++ b/site/notebooks/how_to/use_dataset_model_objects.ipynb @@ -966,14 +966,17 @@ }, { "cell_type": "markdown", + "id": "copyright-340a990e20194848af0efb0c965e219a", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/quickstart/quickstart_model_documentation.ipynb b/site/notebooks/quickstart/quickstart_model_documentation.ipynb index 75dd433292..e88d9a8036 100644 --- a/site/notebooks/quickstart/quickstart_model_documentation.ipynb +++ b/site/notebooks/quickstart/quickstart_model_documentation.ipynb @@ -852,15 +852,17 @@ }, { "cell_type": "markdown", - "id": "09c906f3", + "id": "copyright-e26871efeffc48e386d68e90db1838e7", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/quickstart/quickstart_model_validation.ipynb b/site/notebooks/quickstart/quickstart_model_validation.ipynb index f9470bd705..ffa1bc42ae 100644 --- a/site/notebooks/quickstart/quickstart_model_validation.ipynb +++ b/site/notebooks/quickstart/quickstart_model_validation.ipynb @@ -1173,15 +1173,17 @@ }, { "cell_type": "markdown", - "id": "f33d4214", + "id": "copyright-09361e6dcd874470819686c63660be51", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_development/1-set_up_validmind.ipynb b/site/notebooks/tutorials/model_development/1-set_up_validmind.ipynb index a42e05c064..f85b592d8a 100644 --- a/site/notebooks/tutorials/model_development/1-set_up_validmind.ipynb +++ b/site/notebooks/tutorials/model_development/1-set_up_validmind.ipynb @@ -440,15 +440,17 @@ }, { "cell_type": "markdown", - "id": "39c974f0", + "id": "copyright-3e02e70c1d4d4840bf8d9ef44e2cf20c", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_development/2-start_development_process.ipynb b/site/notebooks/tutorials/model_development/2-start_development_process.ipynb index c4b19ea852..1d62e9c085 100644 --- a/site/notebooks/tutorials/model_development/2-start_development_process.ipynb +++ b/site/notebooks/tutorials/model_development/2-start_development_process.ipynb @@ -989,14 +989,17 @@ }, { "cell_type": "markdown", + "id": "copyright-a258273ac8eb41a29a39e52e6f3b4341", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb b/site/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb index 7966f7886c..49df9a5b50 100644 --- a/site/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb +++ b/site/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb @@ -968,14 +968,17 @@ }, { "cell_type": "markdown", + "id": "copyright-2f589b81f04949f6a2c6764859b8bc86", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb b/site/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb index 8e57366658..0af95d90a7 100644 --- a/site/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb +++ b/site/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb @@ -967,14 +967,17 @@ }, { "cell_type": "markdown", + "id": "copyright-6ef1ef2b30eb44c29b99e625c58826ee", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb b/site/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb index 807d6caf9e..b9154b80c2 100644 --- a/site/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb +++ b/site/notebooks/tutorials/model_validation/1-set_up_validmind_for_validation.ipynb @@ -491,15 +491,17 @@ }, { "cell_type": "markdown", - "id": "47e90901", + "id": "copyright-5d7a1c159e4840fca79011d1c0380725", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_validation/2-start_validation_process.ipynb b/site/notebooks/tutorials/model_validation/2-start_validation_process.ipynb index e6fb19454c..48e4dbff66 100644 --- a/site/notebooks/tutorials/model_validation/2-start_validation_process.ipynb +++ b/site/notebooks/tutorials/model_validation/2-start_validation_process.ipynb @@ -862,14 +862,17 @@ }, { "cell_type": "markdown", + "id": "copyright-149e9401b9df40d393a2a1958331dea6", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_validation/3-developing_challenger_model.ipynb b/site/notebooks/tutorials/model_validation/3-developing_challenger_model.ipynb index 68398c09de..5f7d7e8fd2 100644 --- a/site/notebooks/tutorials/model_validation/3-developing_challenger_model.ipynb +++ b/site/notebooks/tutorials/model_validation/3-developing_challenger_model.ipynb @@ -865,14 +865,17 @@ }, { "cell_type": "markdown", + "id": "copyright-58757a9cc9de45069a5e6c57c8aaff14", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] } diff --git a/site/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb b/site/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb index c5c5f94ea0..fe4e221c90 100644 --- a/site/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb +++ b/site/notebooks/tutorials/model_validation/4-finalize_validation_reporting.ipynb @@ -1200,14 +1200,17 @@ }, { "cell_type": "markdown", + "id": "copyright-53539c44edf54e4e9bc4d959ffbddd2c", "metadata": {}, "source": [ + "\n", + "\n", "\n", "\n", "***\n", "\n", "Copyright © 2023-2026 ValidMind Inc. All rights reserved.
\n", - "Refer to the [LICENSE file in the root of the GitHub `validmind-library` repository](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", + "Refer to [LICENSE](https://github.com/validmind/validmind-library/blob/main/LICENSE) for details.
\n", "SPDX-License-Identifier: AGPL-3.0 AND ValidMind Commercial
" ] }