diff --git a/site/_quarto.yml b/site/_quarto.yml index ab80ddfb9a..cd66981bfb 100644 --- a/site/_quarto.yml +++ b/site/_quarto.yml @@ -39,12 +39,12 @@ website: file: guide/guides.qmd - text: "{{< fa cube >}} Developers" menu: - - text: "{{< fa rocket >}} Get Started" - file: developer/get-started-validmind-library.qmd + - text: "{{< fa laptop-code >}} {{< var validmind.developer >}}" + file: developer/validmind-library.qmd - text: "{{< fa cubes >}} Supported Models" - file: developer/model-documentation/supported-models.qmd - - text: "{{< fa book-medical >}} Documenting Models" - file: developer/model-documentation/documenting-models.qmd + file: developer/supported-models.qmd + - text: "{{< fa rocket >}} QuickStart Notebook" + file: notebooks/quickstart_customer_churn_full_suite.ipynb - text: "---" - text: "{{< fa vial >}} TESTING" - text: "{{< fa flask-vial >}} Run Tests & Test Suites" @@ -63,11 +63,9 @@ website: file: https://jupyterhub.validmind.ai/ - text: "---" - text: "{{< fa book >}} REFERENCE" - - text: "{{< fa external-link >}} {{< var validmind.developer >}}" + - text: "{{< fa external-link >}} {{< var validmind.developer >}} API" file: validmind/validmind.html target: _blank - - text: "FAQ" - file: faq/faq.qmd - text: "Support" file: support/support.qmd - text: "Training" @@ -88,14 +86,12 @@ website: file: get-started/get-started.qmd - text: "{{< fa book >}} Guides" file: guide/guides.qmd - - text: "{{< fa circle-question >}} FAQ" - file: faq/faq.qmd - text: "{{< fa envelope-open-text >}} Support" file: support/support.qmd - text: "---" - text: "{{< fa cube >}} Developers" - text: "{{< fa code >}} {{< var validmind.developer >}}" - file: developer/get-started-validmind-library.qmd + file: developer/validmind-library.qmd - text: "---" - text: "{{< fa graduation-cap >}} {{< var validmind.training >}}" - text: "{{< fa building-columns >}} Training Courses" @@ -283,13 +279,13 @@ website: - file: guide/model-documentation/working-with-documentation-templates.qmd contents: - guide/model-documentation/view-documentation-templates.qmd -# - guide/import-documentation-templates.qmd - guide/model-documentation/customize-documentation-templates.qmd - guide/model-documentation/swap-documentation-templates.qmd - file: guide/model-documentation/working-with-model-documentation.qmd contents: - guide/model-documentation/view-documentation-guidelines.qmd - guide/model-documentation/work-with-content-blocks.qmd + - guide/model-documentation/work-with-test-results.qmd - text: "Assign section statuses" file: guide/model-documentation/assign-documentation-section-statuses.qmd - guide/model-documentation/collaborate-with-others.qmd @@ -297,7 +293,6 @@ website: - guide/model-documentation/export-documentation.qmd - text: "---" - text: "Model Validation" -# - guide/customize-validation-report-templates.qmd - guide/model-validation/manage-validation-guidelines.qmd - file: guide/model-validation/preparing-validation-reports.qmd contents: @@ -333,21 +328,25 @@ website: contents: # USING THE VARIABLE IN THE LINK TEXT MESSES UP THE MOBILE VIEW - text: "ValidMind Library" - file: developer/get-started-validmind-library.qmd + file: developer/validmind-library.qmd + - developer/supported-models.qmd - text: "---" - - text: "Model Documentation" + - text: "QuickStart" - notebooks/quickstart_customer_churn_full_suite.ipynb - - text: "Introduction for model developers" - file: notebooks/tutorials/intro_for_model_developers.ipynb - - developer/model-documentation/supported-models.qmd - - file: developer/model-documentation/documenting-models.qmd - contents: - - developer/model-documentation/document-models.qmd - # USING THE VARIABLE IN THE LINK TEXT MESSES UP THE MOBILE VIEW & BREADCRUMB - - text: "Install and initialize ValidMind Library" - file: developer/model-documentation/install-and-initialize-validmind-library.qmd - - developer/model-documentation/work-with-test-results.qmd - - developer/model-documentation/store-credentials-in-env-file.qmd + - text: "Install and initialize ValidMind Library" + file: developer/model-documentation/install-and-initialize-validmind-library.qmd + - developer/model-documentation/store-credentials-in-env-file.qmd + - text: "---" + - text: "Model Development" + # USING THE VARIABLE IN THE LINK TEXT MESSES UP THE MOBILE VIEW & BREADCRUMB + - text: "101 Set up ValidMind Library" + file: notebooks/tutorials/model_development/101-set_up_validmind.ipynb + - text: "102 Start model development process" + file: notebooks/tutorials/model_development/102-start_development_process.ipynb + - text: "103 Integrate custom tests" + file: notebooks/tutorials/model_development/103-integrate_custom_tests.ipynb + - text: "104 Finalize testing & documentation" + file: notebooks/tutorials/model_development/104-finalize_testing_documentation.ipynb - text: "---" - text: "Model Testing" - text: "Run tests & test suites" @@ -364,36 +363,33 @@ website: contents: "notebooks/code_samples/**" - text: "---" - text: "Reference" - - text: "{{< var validmind.developer >}} {{< fa external-link >}}" + - text: "{{< var validmind.developer >}} API" file: validmind/validmind.html target: _blank - - title: "FAQ" - contents: - - faq/faq.qmd - - text: "---" - - text: "Access & permissions" - file: faq/faq-organizations.qmd - - faq/faq-workflows.qmd - - text: "Inventory & activity" - file: faq/faq-inventory.qmd - - text: "Documentation & templates" - file: faq/faq-documentation.qmd - - text: "Validation & findings" - file: faq/faq-validation.qmd - - faq/faq-collaboration.qmd - - text: "Monitoring & reporting" - file: faq/faq-reporting.qmd - - faq/faq-testing.qmd - - faq/faq-integrations.qmd - - text: "Data & privacy" - file: faq/faq-privacy.qmd - - title: "Support" contents: - support/support.qmd - - text: "---" - support/troubleshooting.qmd + - text: "---" + - file: faq/faq.qmd + contents: + - text: "Access & permissions" + file: faq/faq-organizations.qmd + - faq/faq-workflows.qmd + - text: "Inventory & activity" + file: faq/faq-inventory.qmd + - text: "Documentation & templates" + file: faq/faq-documentation.qmd + - text: "Validation & findings" + file: faq/faq-validation.qmd + - faq/faq-collaboration.qmd + - text: "Monitoring & reporting" + file: faq/faq-reporting.qmd + - faq/faq-testing.qmd + - faq/faq-integrations.qmd + - text: "Data & privacy" + file: faq/faq-privacy.qmd # COMMENT THIS OUT WHEN DONE TESTING # - title: "Testing" diff --git a/site/about/contributing/join-community.qmd b/site/about/contributing/join-community.qmd index 791dbf32cc..0ba7ade88e 100644 --- a/site/about/contributing/join-community.qmd +++ b/site/about/contributing/join-community.qmd @@ -2,8 +2,8 @@ title: "" # date: last-modified aliases: - - ../guide/join-community.html - - ../join-community.html + - /guide/join-community.html + - /about/join-community.html --- ```{=html} diff --git a/site/about/contributing/style-guide/conventions.qmd b/site/about/contributing/style-guide/conventions.qmd index edb0b3c12f..ffa2cd529f 100644 --- a/site/about/contributing/style-guide/conventions.qmd +++ b/site/about/contributing/style-guide/conventions.qmd @@ -286,7 +286,7 @@ For example, for an original file named `site/guide/overview.qmd` moved and rena --- title: "New overview" aliases: - - ../guide/overview.html + - /guide/overview.html --- ``` ::: diff --git a/site/about/contributing/style-guide/style-guide.qmd b/site/about/contributing/style-guide/style-guide.qmd index 68137964bf..ddf837b83f 100644 --- a/site/about/contributing/style-guide/style-guide.qmd +++ b/site/about/contributing/style-guide/style-guide.qmd @@ -13,7 +13,7 @@ listing: - voice-and-tone.qmd - conventions.qmd aliases: - - ../../style-guide.html + - /about/style-guide.html --- A style guide helps create distinct yet unified communication across all areas of a product experience, from in-app interactions to technical documentation and blog posts. diff --git a/site/about/contributing/validmind-community.qmd b/site/about/contributing/validmind-community.qmd index a2a051facb..cc1166102e 100644 --- a/site/about/contributing/validmind-community.qmd +++ b/site/about/contributing/validmind-community.qmd @@ -2,7 +2,7 @@ title: "{{< var vm.product >}} community" date: last-modified aliases: - - ../validmind-commmunity.html + - /about/validmind-community.html --- Work with financial models, in model risk management (MRM), or are simply enthusiastic about artificial intelligence (AI) and machine learning and how these tools are actively shaping our futures within the finance industry and beyond? Congratulations — you're already part of the {{< var vm.product >}} community! Come learn and play with us. @@ -24,7 +24,7 @@ Learn about our company vision, get to know our brand's voice and preferred form Here at {{< var vm.product >}}, we embrace an [open source](https://en.wikipedia.org/wiki/Open_source) ideology. This means that we think expertise is inclusive, and is also open to evolving — there's always an opportunity to learn from each other, and help each other improve. -As a member of the {{< var vm.product >}} community, we invite you to be part of our process. From our public documentation to the code behind our [{{< var vm.developer >}}](/developer/get-started-validmind-library.qmd), we've exposed the wiring for inspection in hopes that great minds think differently and dare to spark change. +As a member of the {{< var vm.product >}} community, we invite you to be part of our process. From our public documentation to the code behind our [{{< var vm.developer >}}](/developer/validmind-library.qmd), we've exposed the wiring for inspection in hopes that great minds think differently and dare to spark change. ### Contribution ideas Please note that all community contributions are subject to review by the {{< var vm.product >}} team. See our [software license agreement](/about/fine-print/license-agreement.qmd) for more details. diff --git a/site/about/fine-print/data-privacy-policy.qmd b/site/about/fine-print/data-privacy-policy.qmd index 3de8cd4e5a..0c357ebcec 100644 --- a/site/about/fine-print/data-privacy-policy.qmd +++ b/site/about/fine-print/data-privacy-policy.qmd @@ -3,8 +3,8 @@ title: "Data privacy policy" keywords: "data privacy, ai risk, model risk management, {{< var vm.product >}}" date: last-modified aliases: - - ../data-privacy-policy.html - - ../guide/data-privacy-policy.html + - /about/data-privacy-policy.html + - /guide/data-privacy-policy.html --- This page outlines {{< var vm.product >}}'s data privacy policy, explaining how we protect your personal information. diff --git a/site/about/fine-print/license-agreement.qmd b/site/about/fine-print/license-agreement.qmd index 5ed54b45d3..9d3cbd8f5e 100644 --- a/site/about/fine-print/license-agreement.qmd +++ b/site/about/fine-print/license-agreement.qmd @@ -2,8 +2,8 @@ title: "SOFTWARE LICENSE AGREEMENT" date: last-modified aliases: - - ../license-agreement.html - - ../guide/license-agreement.html + - /about/license-agreement.html + - /guide/license-agreement.html --- IMPORTANT: READ THIS SOFTWARE LICENSE AGREEMENT (THIS “AGREEMENT”) CAREFULLY BEFORE USING THE SOFTWARE. BY USING THE SOFTWARE, YOU ARE AGREEING TO BE BOUND BY THE TERMS OF THIS LICENSE AGREEMENT: diff --git a/site/about/glossary/key_concepts/_inputs.qmd b/site/about/glossary/key_concepts/_inputs.qmd index ea57f4a5b5..6dcaf62e7b 100644 --- a/site/about/glossary/key_concepts/_inputs.qmd +++ b/site/about/glossary/key_concepts/_inputs.qmd @@ -4,4 +4,4 @@ inputs - **model**: A single model that has been initialized in {{< var vm.product >}} with `vm.init_model()`. See the [Model Documentation](/validmind/validmind.html#init_model){target="_blank"} or the for more information. - **dataset**: Single dataset that has been initialized in {{< var vm.product >}} with `vm.init_dataset()`. See the [Dataset Documentation](/validmind/validmind.html#init_dataset){target="_blank"} for more information. - **models**: A list of {{< var vm.product >}} models - usually this is used when you want to compare multiple models in your custom tests. - - **datasets**: A list of {{< var vm.product >}} datasets - usually this is used when you want to compare multiple datasets in your custom tests. See this [example](/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb) for more information. + - **datasets**: A list of {{< var vm.product >}} datasets - usually this is used when you want to compare multiple datasets in your custom tests. (Learn more: [Run tests with multiple datasets(/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb)]) diff --git a/site/about/overview-model-documentation.qmd b/site/about/overview-model-documentation.qmd index 63456125c6..1b03cd3f23 100644 --- a/site/about/overview-model-documentation.qmd +++ b/site/about/overview-model-documentation.qmd @@ -2,7 +2,7 @@ title: "Automated model testing & documentation" date: last-modified aliases: - - ../guide/overview-model-documentation.html + - /guide/overview-model-documentation.html listing: id: quickstart type: grid @@ -138,7 +138,7 @@ How the {{< var validmind.developer >}} works: [^1]: [Model risk management](overview-model-risk-management.qmd) -[^2]: [Supported models](/developer/model-documentation/supported-models.qmd) +[^2]: [Supported models](/developer/supported-models.qmd) [^3]: [Customize documentation templates](/guide/model-documentation/customize-documentation-templates.qmd) diff --git a/site/about/overview-model-risk-management.qmd b/site/about/overview-model-risk-management.qmd index 93a77f0a7b..56c57a72ff 100644 --- a/site/about/overview-model-risk-management.qmd +++ b/site/about/overview-model-risk-management.qmd @@ -2,7 +2,7 @@ title: "Model risk management" date: last-modified aliases: - - ../guide/overview-model-risk-management.html + - /guide/overview-model-risk-management.html listing: id: quickstart type: grid diff --git a/site/about/overview.qmd b/site/about/overview.qmd index c631d4229f..d7d7db0236 100644 --- a/site/about/overview.qmd +++ b/site/about/overview.qmd @@ -12,7 +12,7 @@ listing: - overview-model-documentation.qmd - overview-model-risk-management.qmd aliases: - - ../guide/overview.html + - /guide/overview.html --- :::: {.flex .flex-wrap .justify-around} diff --git a/site/developer/model-documentation/document-models.qmd b/site/developer/model-documentation/document-models.qmd deleted file mode 100644 index c1900abe5d..0000000000 --- a/site/developer/model-documentation/document-models.qmd +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Document models" -date: last-modified ---- - -Generate model documentation starting with your model or model predictions, load your model or predictions into the {{< var vm.developer >}}, then finally view the results and refine your documentation in the {{< var vm.platform >}} to make it ready for approval. - -::: {.attn} - -## Prerequisites - -- [x] {{< var link.login >}} -- [x] You are the model owner. -- [x] You are a [{{< fa code >}} Developer]{.bubble} or assigned another role with sufficient permissions to perform the tasks in this guide.[^1] - -::: - -## End-to-end workflow - -### In your modeling environment - -1. Build your model or your model predictions.[^2] - -2. Export the datasets and model or predictions. - -### With the {{< var validmind.developer >}} - -1. **From your modeling environment**, load the trained datasets and models or predictions. - -2. [Install and initialize the {{< var validmind.developer >}}.](install-and-initialize-validmind-library.qmd) - -3. Select the relevant tests. - -4. Review if all tests are covered by {{< var vm.product >}} or your external test provider: - - - **If all tests are NOT covered** — Create and register additional tests. - - **If all tests are covered** — - a. Run the selected tests. - b. Review your test results. - -### In the {{< var validmind.platform >}} - -1. **After installing and initalizing the {{< var validmind.developer >}}**,[^4] add content blocks[^3] to your model documentation: - - Select the block type: - - - **For test-driven blocks** — Select from available test provider results[^5] - - **For text blocks** — - - a. For new block: - 1. Add new editable text content block - 2. Review and collaborate on the content block - b. For existing blocks: Select from available texts from content provider - -2. [Submit your model documentation for review](/guide/model-documentation/submit-for-approval.qmd). - -## What's next - -- [Store model credentials in `.env` files](store-credentials-in-env-file.qmd) -- [Work with test results](work-with-test-results.qmd) - - - - -[^1]: [Manage permissions](/guide/configuration/manage-permissions.qmd) - -[^2]: - - **No available model?**
- You can still run tests and log documentation with {{< var vm.product >}} as long as you're able to [load the model predictions](/faq/faq-documentation.qmd#can-i-run-tests-and-log-documentation-without-having-a-model-available). - -[^3]: [Work with content blocks](/guide/model-documentation/work-with-content-blocks.qmd) - -[^4]: [Install and initialize the {{< var validmind.developer >}}](install-and-initialize-validmind-library.qmd) - -[^5]: [Work with test results](work-with-test-results.qmd) diff --git a/site/developer/model-documentation/documenting-models.qmd b/site/developer/model-documentation/documenting-models.qmd deleted file mode 100644 index 7454a67e39..0000000000 --- a/site/developer/model-documentation/documenting-models.qmd +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "Documenting models" -date: last-modified -listing: - - id: generate-documentation - type: grid - grid-columns: 2 - max-description-length: 250 - sort: false - fields: [title, description] - contents: - - document-models.qmd - - install-and-initialize-validmind-library.qmd - - work-with-test-results.qmd - - store-credentials-in-env-file.qmd -aliases: - - generating-model-documentation.html - - generate-model-documentation.html - - ../../guide/generate-model-documentation.html ---- - -Use the {{< var vm.developer >}} to generate model documentation, view the results and refine your documentation in the {{< var vm.platform >}}, and collaborate with your team to complete documentation and validation initiatives within the same interface. - -## How do I use the {{< var validmind.developer >}}? - -A typical high-level workflow for model developers consists of four major steps: - -```{mermaid} -%%| fig-align: center -graph LR - A[Develop
model] --> B[Generate model
documentation] - B --> C[Refine model
documentation] - C --> D[Submit for review] - C --> B -``` -
- -#### 1. Develop your model[^1] - -In your existing developer environment, build one or more candidate models that need to be validated. This step includes all the usual activities you already follow as a model developer. - -#### 2. Generate model documentation - -With the {{< var validmind.developer >}}, generate automated model documentation and run validation tests. This step includes making use of the automation and testing functionality provided by the {{< var vm.developer >}} and uploading the output to the {{< var vm.platform >}}. You can iteratively regenerate the documentation as you work though the next step of refining your documentation. - -#### 3. Refine model documentation -In the {{< var validmind.platform >}}, review the generated documentation and test output. Iterate over the documentation and test output to refine your model documentation. Collaborate with other developers and model validators to finalize the model documentation and get it ready for review. - -#### 4. Submit for review -In the {{< var validmind.platform >}}, you submit the model documentation for review which moves the documentation workflow moves to the next phase where a model validator will review it. - -Before you can use the {{< var validmind.developer >}}, you need to verify that the current documentation template contains all the necessary tests for the model you are developing: - -- The template might already be sufficient and you only need to run the template within the {{< var vm.developer >}} to populate documentation. -- Or, more likely, the template might need additional tests that you can add these tests via the {{< var vm.developer >}}. - -## How do I generate documentation? - -This process of verifying the suitability of the the current documentation template and adding more tests to the template is an iterative process: - -```{mermaid} -%%| fig-align: center -graph LR - A[Verify template] --> B[Build template] - B --> D[Add tests and
content blocks] - D --> E[Add external
test providers] - E --> C[Run template] - C --> B - -``` -
- -#### Build the template - -When the documentation template requires more tests to be added, or if the documentation template does not include a specific content or test block you need: - - - **For functionality provided by the {{< var validmind.developer >}}** — [Add the relevant tests or content blocks](/guide/model-documentation/work-with-content-blocks.qmd) for the model use case. - - **For tests not provided by the {{< var vm.developer >}}** — [Add your own external test provider](/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb). - - -#### Run the template - -When you have registered all the required tests as content blocks in the documentation template, populate the necessary model documentation by adding this call to your model: - - ```python - run_documentation_tests() - ``` - -::: {.callout-important} -## {{< var vm.product >}} may not support all potential use cases or provide a universally applicable documentation template. - Typically, you initiate the process of putting {{< var vm.product >}} into production by constructing a template specific for your own use case and then refine your model documentation. -::: - -## What's next - -:::{#generate-documentation} -::: - - - - -[^1]: - - **No available model?**
- You can still run tests and log documentation with {{< var vm.product >}} as long as you're able to [load the model predictions](/faq/faq-documentation.qmd#no-available-model). \ No newline at end of file diff --git a/site/developer/model-documentation/install-and-initialize-validmind-library.qmd b/site/developer/model-documentation/install-and-initialize-validmind-library.qmd index 5a671db5b5..5fe6434375 100644 --- a/site/developer/model-documentation/install-and-initialize-validmind-library.qmd +++ b/site/developer/model-documentation/install-and-initialize-validmind-library.qmd @@ -4,8 +4,8 @@ title: "Install and initialize the ValidMind Library" date: last-modified aliases: - install-and-initialize-validmind.html - - ../../guide/install-and-initialize-validmind-library.html - - ../../guide/install-and-initialize-developer-framework.html + - /guide/install-and-initialize-validmind-library.html + - /guide/install-and-initialize-developer-framework.html - install-and-initialize-client-library.html --- diff --git a/site/developer/model-documentation/store-credentials-in-env-file.qmd b/site/developer/model-documentation/store-credentials-in-env-file.qmd index c9ceeaf93b..82529d0235 100644 --- a/site/developer/model-documentation/store-credentials-in-env-file.qmd +++ b/site/developer/model-documentation/store-credentials-in-env-file.qmd @@ -2,7 +2,7 @@ title: "Store model credentials in `.env` files" date: last-modified aliases: - - ../../guide/store-credentials-in-env-file.html + - /guide/store-credentials-in-env-file.html --- Learn how to store model identifier credentials in a `.env` file instead of using inline credentials. This topic is relevant for model developers who want to follow best practices for security when running notebooks. @@ -116,7 +116,7 @@ vm.init( ## What's next - [Working with model documentation](/guide/model-documentation/working-with-model-documentation.qmd) -- [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +- [{{< var validmind.developer >}}](/developer/validmind-library.qmd) diff --git a/site/developer/model-testing/test-descriptions.qmd b/site/developer/model-testing/test-descriptions.qmd index 7a0e710198..fe4f56aea1 100644 --- a/site/developer/model-testing/test-descriptions.qmd +++ b/site/developer/model-testing/test-descriptions.qmd @@ -2,7 +2,7 @@ title: "Test descriptions" date: last-modified aliases: - - ../../guide/test-descriptions.html + - /guide/test-descriptions.html listing: - id: data-validation contents: "../../tests/data_validation/*.md" diff --git a/site/developer/model-testing/test-sandbox.qmd b/site/developer/model-testing/test-sandbox.qmd index c52c58e869..8b9247ba83 100644 --- a/site/developer/model-testing/test-sandbox.qmd +++ b/site/developer/model-testing/test-sandbox.qmd @@ -2,7 +2,7 @@ title: "Test sandbox [beta]{.smallcaps}" date: last-modified aliases: - - ../../guide/test-sandbox.html + - /guide/test-sandbox.html --- - -![The two main components of {{< var vm.product >}}: the {{< var validmind.developer >}} that integrates with your existing developer environment, and the {{< var validmind.platform >}}](/get-started/validmind-lifecycle.jpg){width=70% fig-alt="An image showing the two main components of ValidMind: the ValidMind Library that integrates with your existing developer environment, and the ValidMind Platform"} - {{< var vm.product >}} offers two primary methods for automating model documentation: - **Generate documentation** — Through automation, the {{< var vm.developer >}} extracts metadata from associated datasets and models for you and generates model documentation based on a template. You can also add more documentation and tests manually using the documentation editing capabilities in the {{< var validmind.platform >}}. @@ -65,7 +82,7 @@ The {{< var validmind.developer >}} provides a rich collection of documentation The {{< var validmind.developer >}} is designed to be model agnostic. If your model is built in Python, the {{< var vm.developer >}} provides all the standard functionality you may need without requiring you to rewrite any functions. -::: {.callout-important collapse="false" appearance="minimal"} +::: {.callout-important collapse="true" appearance="minimal"} ## {{< fa building-columns >}} Key {{< var vm.product >}} concepts @@ -74,17 +91,18 @@ The {{< var validmind.developer >}} is designed to be model agnostic. If your mo ::: -## Getting started - -::: {.grid} -::: {.g-col-8} -{{< video https://youtu.be/rIR8Mql7eGs title='ValidMind QuickStart' >}} -::: +## QuickStart + +After you [**sign up**](/guide/configuration/accessing-validmind.qmd) for {{< var vm.product >}} to get access, try our QuickStart: + +:::{#library-quickstart} ::: -After you [**sign up**]({{< var url.us1 >}}) for {{< var vm.product >}} to get access, try one of our getting started guides: +## {{< var vm.product >}} for model development -:::{#developer-getting-started} +Learn how to use ValidMind for your end-to-end model documentation process based on common model development scenarios with our *ValidMind for model development* series of four introductory notebooks: + +:::{#model-development} ::: @@ -104,7 +122,7 @@ The {{< var validmind.developer >}} provides many built-in tests and test suites :::: -:::{#developer-how-to-beginner} +:::{#run-tests} ::: ## Try the code samples @@ -117,30 +135,30 @@ Our code samples showcase the capabilities of the {{< var validmind.developer >} ::: ::: {.w-20-ns .tc} -[Code samples](samples-jupyter-notebooks.qmd){.button .button-green} +[All code samples](samples-jupyter-notebooks.qmd){.button .button-green} ::: :::: -:::{#developer-code-samples} +:::{#code-samples} ::: -## Document models +## Work with model documentation :::: {.flex .flex-wrap .justify-around} -::: {.w-80-ns} +::: {.w-70-ns} After you have tried out the {{< var validmind.developer >}}, continue working with your model documentation in the {{< var validmind.platform >}}: ::: -::: {.w-20-ns .tc} -[Supported models](model-documentation/supported-models.qmd){.button .button-green} +::: {.w-30-ns .tc} +[Working with model documentation](/guide/model-documentation/working-with-model-documentation.qmd){.button .button-green} ::: :::: -:::{#document-models} +:::{#library-documentation} ::: \ No newline at end of file diff --git a/site/faq/_faq-model-updates.qmd b/site/faq/_faq-model-updates.qmd index 0cbf463fab..93d1bc917f 100644 --- a/site/faq/_faq-model-updates.qmd +++ b/site/faq/_faq-model-updates.qmd @@ -1,5 +1,5 @@ ## How does {{< var vm.product >}} manage updates to models? -1. {{< var vm.product >}} allows model developers to re-run documentation functions with the {{< var validmind.developer >}}^[[Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd)] to capture changes in the model, such as changes in the number of features or hyperparameters. +1. {{< var vm.product >}} allows model developers to re-run documentation functions with the {{< var validmind.developer >}}^[[{{< var validmind.developer >}}](/developer/validmind-library.qmd)] to capture changes in the model, such as changes in the number of features or hyperparameters. 2. After a model developer has made a change in their development environment, such as to a Jupyter Notebook,^[[Code samples](/developer/samples-jupyter-notebooks.qmd)] they can execute the relevant {{< var vm.product >}} documentation function to update the corresponding documentation section. 3. {{< var vm.product >}} will then automatically recreate the relevant figures and tables and update them in the online documentation. \ No newline at end of file diff --git a/site/faq/faq-collaboration.qmd b/site/faq/faq-collaboration.qmd index 7583fa1851..0474404f0f 100644 --- a/site/faq/faq-collaboration.qmd +++ b/site/faq/faq-collaboration.qmd @@ -2,7 +2,7 @@ title: "Collaboration" date: last-modified aliases: - - ../guide/faq-workflows.html + - /guide/faq-workflows.html listing: - id: faq-collaboration type: grid diff --git a/site/faq/faq-documentation.qmd b/site/faq/faq-documentation.qmd index b408c0c41d..18e83124e5 100644 --- a/site/faq/faq-documentation.qmd +++ b/site/faq/faq-documentation.qmd @@ -2,7 +2,7 @@ title: "Model documentation and templates" date: last-modified aliases: - - ../guide/faq-documentation.html + - /guide/faq-documentation.html listing: - id: faq-documentation type: grid @@ -99,11 +99,11 @@ These features are currently on the roadmap and under research, no release sched [^5]: [Manage permissions](/guide/configuration/manage-permissions.qmd) -[^6]: [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^6]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^7]: [Work with content blocks](/guide/model-documentation/work-with-content-blocks.qmd) -[^8]: [Work with test results](/developer/model-documentation/work-with-test-results.qmd) +[^8]: [Work with test results](/guide/model-documentation/work-with-test-results.qmd) [^9]: [`assign_predictions()`](/validmind/validmind/vm_models.html#VMDataset.assign_predictions) diff --git a/site/faq/faq-integrations.qmd b/site/faq/faq-integrations.qmd index 169aa22997..7eaf95c1e3 100644 --- a/site/faq/faq-integrations.qmd +++ b/site/faq/faq-integrations.qmd @@ -2,7 +2,7 @@ title: "Integrations and support" date: last-modified aliases: - - ../guide/faq-integrations.html + - /guide/faq-integrations.html listing: - id: faq-integrations type: grid @@ -11,7 +11,7 @@ listing: sort: false fields: [title, description] contents: - - ../developer/model-documentation/supported-models.qmd + - ../developer/supported-models.qmd - ../about/overview-llm-features.qmd - ../about/deployment/deployment-options.qmd --- @@ -76,9 +76,9 @@ We will be implementing connector interfaces allowing extraction of relationship -[^1]: [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^1]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) -[^2]: [Supported modeling libraries and other tools](/developer/model-documentation/supported-models.qmd#supported-modeling-libraries-and-other-tools) +[^2]: [Supported modeling libraries and other tools](/developer/supported-models.qmd#supported-modeling-libraries-and-other-tools) [^3]: [Matplotlib](https://matplotlib.org/) diff --git a/site/faq/faq-inventory.qmd b/site/faq/faq-inventory.qmd index 48a0b36c93..b233b959ff 100644 --- a/site/faq/faq-inventory.qmd +++ b/site/faq/faq-inventory.qmd @@ -2,7 +2,7 @@ title: "Model inventory and activity" date: last-modified aliases: - - ../guide/faq-inventory.html + - /guide/faq-inventory.html listing: - id: faq-inventory type: grid diff --git a/site/faq/faq-privacy.qmd b/site/faq/faq-privacy.qmd index d2c0662a0b..a8730d0717 100644 --- a/site/faq/faq-privacy.qmd +++ b/site/faq/faq-privacy.qmd @@ -2,9 +2,9 @@ title: "Data handling and privacy" date: last-modified aliases: - - ../guide/faq-privacy.html + - /guide/faq-privacy.html - faq-data-handling.html - - ../guide/faq-data-handling.html + - /guide/faq-data-handling.html listing: - id: faq-privacy type: grid diff --git a/site/faq/faq-testing.qmd b/site/faq/faq-testing.qmd index 3f4f195bc3..34ae2a4af1 100644 --- a/site/faq/faq-testing.qmd +++ b/site/faq/faq-testing.qmd @@ -2,7 +2,7 @@ title: "Testing" date: last-modified aliases: - - ../guide/faq-testing.html + - /guide/faq-testing.html listing: - id: faq-testing type: grid @@ -57,7 +57,7 @@ In addition to custom tests, you can also add use case and test-specific context -[^1]: [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^1]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^2]: [Accessing {{< var vm.product >}}](/guide/configuration/accessing-validmind.qmd) diff --git a/site/faq/faq-validation.qmd b/site/faq/faq-validation.qmd index 74e4c1c959..d43ed50f9d 100644 --- a/site/faq/faq-validation.qmd +++ b/site/faq/faq-validation.qmd @@ -2,7 +2,7 @@ title: "Model validation and findings" date: last-modified aliases: - - ../guide/faq-documentation.html + - /guide/faq-documentation.html listing: - id: faq-validation type: grid diff --git a/site/faq/faq-workflows.qmd b/site/faq/faq-workflows.qmd index cdabc59090..4500a534ea 100644 --- a/site/faq/faq-workflows.qmd +++ b/site/faq/faq-workflows.qmd @@ -2,7 +2,7 @@ title: "Model workflows" date: last-modified aliases: - - ../guide/faq-workflows.html + - /guide/faq-workflows.html listing: - id: faq-workflows type: grid @@ -59,4 +59,4 @@ You do not need to use the {{< var validmind.platform >}} while you are in the e [^2]: [Manage permissions](/guide/configuration/manage-permissions.qmd) -[^3]: [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) \ No newline at end of file +[^3]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) \ No newline at end of file diff --git a/site/faq/faq.qmd b/site/faq/faq.qmd index fe1f439ef6..0f1eb553c3 100644 --- a/site/faq/faq.qmd +++ b/site/faq/faq.qmd @@ -2,7 +2,7 @@ title: "FAQ" date: last-modified aliases: - - ../guide/faq.html + - /guide/faq.html listing: - id: faq type: default @@ -47,8 +47,4 @@ listing: Find answers to frequently asked questions (FAQs), grouped by topic: :::{#faq} -::: - -{{< include /training/_training-support.qmd >}} - -{{< include /support/_submit-feedback.qmd >}} \ No newline at end of file +::: \ No newline at end of file diff --git a/site/get-started/developer/try-in-your-own-environment.qmd b/site/get-started/developer/try-in-your-own-environment.qmd index 86e7c765d9..d852a35bc3 100644 --- a/site/get-started/developer/try-in-your-own-environment.qmd +++ b/site/get-started/developer/try-in-your-own-environment.qmd @@ -2,7 +2,7 @@ title: "Try it in your own developer environment" date: last-modified aliases: - - ../../guide/quickstart-try-developer-framework-in-your-own-developer-environment.html + - /guide/quickstart-try-developer-framework-in-your-own-developer-environment.html --- Learn how to document a model with {{< var vm.product >}} locally in your own developer environment. You can either clone our open-source repository or download the code samples to run the notebook. diff --git a/site/get-started/developer/try-with-colab.qmd b/site/get-started/developer/try-with-colab.qmd index f99933d050..a19876be15 100644 --- a/site/get-started/developer/try-with-colab.qmd +++ b/site/get-started/developer/try-with-colab.qmd @@ -2,7 +2,7 @@ title: "Try it with Google Colaboratory" date: last-modified aliases: - - ../../guide/quickstart-try-developer-framework-with-colab.html + - /guide/quickstart-try-developer-framework-with-colab.html --- Learn how to document a model with {{< var vm.product >}} on Google Colaboratory. diff --git a/site/get-started/developer/try-with-jupyterhub.qmd b/site/get-started/developer/try-with-jupyterhub.qmd index a3c1df8708..e869e577f7 100644 --- a/site/get-started/developer/try-with-jupyterhub.qmd +++ b/site/get-started/developer/try-with-jupyterhub.qmd @@ -2,7 +2,7 @@ title: "Try it with JupyterHub (recommended)" date: last-modified aliases: - - ../../guide/quickstart-try-developer-framework-with-jupyterhub.html + - /guide/quickstart-try-developer-framework-with-jupyterhub.html --- Learn how to document a model with {{< var vm.product >}} on JupyterHub. diff --git a/site/get-started/get-started.qmd b/site/get-started/get-started.qmd index 39c4fef019..740ec85d44 100644 --- a/site/get-started/get-started.qmd +++ b/site/get-started/get-started.qmd @@ -2,7 +2,7 @@ title: "Get started" date: last-modified aliases: - - ../guide/get-started.html + - /guide/get-started.html listing: id: next-steps type: grid diff --git a/site/get-started/platform/explore-sample-model-documentation.qmd b/site/get-started/platform/explore-sample-model-documentation.qmd index aba3bbe4d8..3796770461 100644 --- a/site/get-started/platform/explore-sample-model-documentation.qmd +++ b/site/get-started/platform/explore-sample-model-documentation.qmd @@ -2,7 +2,7 @@ title: "Explore sample model documentation" date: last-modified aliases: - - ../../guide/quickstart-explore-sample-model-documentation.html + - /guide/quickstart-explore-sample-model-documentation.html --- First, let's take a look at how the {{< var vm.product >}} handles model documentation. The best place to start is with the {{< var validmind.platform >}}. @@ -107,7 +107,7 @@ Continue with [Register your first model](register-your-first-model.qmd) to lear [^7]: [View model activity](/guide/model-inventory/view-model-activity.qmd) -[^8]: [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^8]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^9]: [Working with documentation templates](/guide/model-documentation/working-with-documentation-templates.qmd) diff --git a/site/get-started/platform/generate-documentation-for-your-model.qmd b/site/get-started/platform/generate-documentation-for-your-model.qmd index c5001c5b55..2baddf13e7 100644 --- a/site/get-started/platform/generate-documentation-for-your-model.qmd +++ b/site/get-started/platform/generate-documentation-for-your-model.qmd @@ -2,7 +2,7 @@ title: "Generate documentation for your model" date: last-modified aliases: - - ../../guide/quickstart-generate-documentation-for-your-model.html + - /guide/quickstart-generate-documentation-for-your-model.html --- You are now ready to modify the QuickStart notebook you used earlier and run it to generate model documentation and test result with the {{< var validmind.developer >}}. The resulting artifacts are then uploaded to your model documentation in the {{< var validmind.platform >}}. diff --git a/site/get-started/platform/next-steps.qmd b/site/get-started/platform/next-steps.qmd index b3ec4a6151..3b482f0ae6 100644 --- a/site/get-started/platform/next-steps.qmd +++ b/site/get-started/platform/next-steps.qmd @@ -2,7 +2,7 @@ title: "Next steps" date: last-modified aliases: - - ../../guide/next-steps.html + - /guide/next-steps.html listing: - id: next-admin type: grid @@ -19,7 +19,7 @@ listing: sort: false fields: [title, description] contents: - - ../../developer/get-started-validmind-library.qmd + - ../../developer/validmind-library.qmd - ../../developer/samples-jupyter-notebooks.qmd - ../../guide/guides.qmd - id: next-validators diff --git a/site/get-started/platform/register-your-first-model.qmd b/site/get-started/platform/register-your-first-model.qmd index f44bdf077b..689c99270c 100644 --- a/site/get-started/platform/register-your-first-model.qmd +++ b/site/get-started/platform/register-your-first-model.qmd @@ -2,7 +2,7 @@ title: "Register your first model" date: last-modified aliases: - - ../../guide/quickstart-register-your-first-model.html + - /guide/quickstart-register-your-first-model.html --- To be able to document models with the {{< var validmind.platform >}}, you need to first register them in the model inventory. Let's show you how. diff --git a/site/get-started/quickstart.qmd b/site/get-started/quickstart.qmd index 91b37a21aa..7773ec434c 100644 --- a/site/get-started/quickstart.qmd +++ b/site/get-started/quickstart.qmd @@ -2,7 +2,7 @@ title: "QuickStart" date: last-modified aliases: - - ../guide/quickstart.html + - /guide/quickstart.html --- The easiest way to get started with {{< var vm.product >}} is to try out our {{< var vm.developer >}} in JupyterHub and to explore the {{< var validmind.platform >}} online. diff --git a/site/guide/configuration/configure-aws-privatelink.qmd b/site/guide/configuration/configure-aws-privatelink.qmd index c78455530b..0696835db0 100644 --- a/site/guide/configuration/configure-aws-privatelink.qmd +++ b/site/guide/configuration/configure-aws-privatelink.qmd @@ -2,7 +2,7 @@ title: "Configure AWS PrivateLink" date: last-modified aliases: - - ../configure-aws-privatelink.html + - /guide/configure-aws-privatelink.html --- To keep your network traffic private and minimize its attack surface, configure AWS PrivateLink[^1] to establish a private connection between {{< var vm.product >}} and your company network. diff --git a/site/guide/configuration/configure-azure-private-link.qmd b/site/guide/configuration/configure-azure-private-link.qmd index 22db3a2395..688dcda9fd 100644 --- a/site/guide/configuration/configure-azure-private-link.qmd +++ b/site/guide/configuration/configure-azure-private-link.qmd @@ -2,7 +2,7 @@ title: "Configure Azure Private Link" date: last-modified aliases: - - ../configure-aws-privatelink.html + - /guide/configure-azure-private-link.html --- To keep your network traffic private and minimize its attack surface, configure Azure Private Link[^1] to establish a private connection between {{< var vm.product >}} and your company network. diff --git a/site/guide/configuration/manage-groups.qmd b/site/guide/configuration/manage-groups.qmd index 8de92ad6d0..c1ae90e306 100644 --- a/site/guide/configuration/manage-groups.qmd +++ b/site/guide/configuration/manage-groups.qmd @@ -2,7 +2,7 @@ title: "Manage groups" date: last-modified aliases: - - ../manage-groups.html + - /guide/manage-groups.html --- Groups are segments of users with the ability to view models associated with that group. Access to granular features in the {{< var validmind.platform >}} within a group's set of models is further defined by roles and permissions. diff --git a/site/guide/configuration/manage-permissions.qmd b/site/guide/configuration/manage-permissions.qmd index 4a1b500fc5..79c45690cd 100644 --- a/site/guide/configuration/manage-permissions.qmd +++ b/site/guide/configuration/manage-permissions.qmd @@ -3,7 +3,7 @@ title: "Manage permissions" date: last-modified tbl-cap-location: bottom aliases: - - ../manage-permissions.html + - /guide/manage-permissions.html --- Permissions dictate user access controls within the {{< var validmind.platform >}}, and are associated with specific roles. Assign granular permissions to roles according to your organization's custom requirements. diff --git a/site/guide/configuration/manage-roles.qmd b/site/guide/configuration/manage-roles.qmd index 9b1edc6101..007d501cde 100644 --- a/site/guide/configuration/manage-roles.qmd +++ b/site/guide/configuration/manage-roles.qmd @@ -1,7 +1,7 @@ --- title: "Manage roles" aliases: - - ../manage-roles.html + - /guide/manage-roles.html --- Roles are a named set of permissions that determine your users' access to features within the {{< var vm.platform >}} based on your organization's structure. Create and update roles, and verify that each role has appropriate access to the resources they need. diff --git a/site/guide/configuration/manage-users.qmd b/site/guide/configuration/manage-users.qmd index 1fda886600..00c8a90592 100644 --- a/site/guide/configuration/manage-users.qmd +++ b/site/guide/configuration/manage-users.qmd @@ -2,7 +2,7 @@ title: "Manage users" date: last-modified aliases: - - ../manage-users.html + - /guide/manage-users.html --- Invite new users and review your current users from within the user directory. Users belong to groups which determine which models they can see, and have roles with attached permissions which define the level of access they have to features. diff --git a/site/guide/configuration/managing-users.qmd b/site/guide/configuration/managing-users.qmd index b8c63e73bd..d7b300deec 100644 --- a/site/guide/configuration/managing-users.qmd +++ b/site/guide/configuration/managing-users.qmd @@ -15,7 +15,7 @@ listing: - ./manage-roles.qmd - ./manage-permissions.qmd aliases: - - ../onboarding-users.html + - /guide/onboarding-users.html --- Control and organize who has access to the {{< var validmind.platform >}} and what features they're able to use. Invite users, create user groups and roles, and assign roles to users and permissions to your roles. diff --git a/site/guide/configuration/set-up-your-organization.qmd b/site/guide/configuration/set-up-your-organization.qmd index cda0fbd07d..ca43ea4658 100644 --- a/site/guide/configuration/set-up-your-organization.qmd +++ b/site/guide/configuration/set-up-your-organization.qmd @@ -2,7 +2,7 @@ title: "Set up your organization" date: last-modified aliases: - - ../set-up-your-organization.html + - /guide/set-up-your-organization.html --- This task involves managing organizations within {{< var vm.product >}}, allowing for effective business unit control. diff --git a/site/guide/guides.qmd b/site/guide/guides.qmd index 991a80a9ab..c497735145 100644 --- a/site/guide/guides.qmd +++ b/site/guide/guides.qmd @@ -57,7 +57,7 @@ listing: sort: false fields: [title, description] contents: - - ../developer/get-started-validmind-library.qmd + - ../developer/validmind-library.qmd - ../developer/samples-jupyter-notebooks.qmd - id: guides-model-validation type: grid diff --git a/site/guide/model-documentation/assign-documentation-section-statuses.qmd b/site/guide/model-documentation/assign-documentation-section-statuses.qmd index 8ba7f626bc..7f573b8f97 100644 --- a/site/guide/model-documentation/assign-documentation-section-statuses.qmd +++ b/site/guide/model-documentation/assign-documentation-section-statuses.qmd @@ -2,7 +2,7 @@ title: "Assign documentation section statuses" date: last-modified aliases: - - ../assign-documentation-section-statuses.html + - /guide/assign-documentation-section-statuses.html --- Assign a completion status to individual sections of your model documentation that will be reflected in your Document Overview. diff --git a/site/guide/model-documentation/collaborate-with-others.qmd b/site/guide/model-documentation/collaborate-with-others.qmd index f4c4a95663..3b8988ae2c 100644 --- a/site/guide/model-documentation/collaborate-with-others.qmd +++ b/site/guide/model-documentation/collaborate-with-others.qmd @@ -2,8 +2,8 @@ title: "Collaborate with others" date: last-modified aliases: - - ../collaborate-on-documentation-projects.html - - ../collborate-with-others.html + - /guide/collaborate-on-documentation-projects.html + - /guide/collaborate-with-others.html --- Use the real-time collaboration features to track changes, add comments, and access the revision history for model documentation and validation reports. diff --git a/site/guide/model-documentation/customize-documentation-templates.qmd b/site/guide/model-documentation/customize-documentation-templates.qmd index 22bf61e2ab..d6d75f56a3 100644 --- a/site/guide/model-documentation/customize-documentation-templates.qmd +++ b/site/guide/model-documentation/customize-documentation-templates.qmd @@ -2,7 +2,7 @@ title: "Customize documentation templates" date: last-modified aliases: - - ../customize-documentation-templates.html + - /guide/customize-documentation-templates.html --- {{< var vm.product >}} offers robust and fully customizable templates for model documentation and validation reports. Configure these templates via multiple methods to suit your organization's unique needs. diff --git a/site/guide/model-documentation/export-documentation.qmd b/site/guide/model-documentation/export-documentation.qmd index aedd395c1a..b78ffc4dd0 100644 --- a/site/guide/model-documentation/export-documentation.qmd +++ b/site/guide/model-documentation/export-documentation.qmd @@ -2,7 +2,7 @@ title: "Export documentation" date: last-modified aliases: - - ../export-documentation.html + - /guide/export-documentation.html --- Export your model documentation or validation reports as Microsoft Word files (`.docx`) for use outside of the {{< var validmind.platform >}}. diff --git a/site/guide/model-documentation/submit-for-approval.qmd b/site/guide/model-documentation/submit-for-approval.qmd index 8877924fa0..82f6ee26ca 100644 --- a/site/guide/model-documentation/submit-for-approval.qmd +++ b/site/guide/model-documentation/submit-for-approval.qmd @@ -2,7 +2,7 @@ title: "Submit for approval" date: last-modified aliases: - - ../submit-for-approval.html + - /guide/submit-for-approval.html --- When you're ready, verify your model's status, and then submit your model documentation or validation report for approval. diff --git a/site/guide/model-documentation/swap-documentation-templates.qmd b/site/guide/model-documentation/swap-documentation-templates.qmd index 1789a12965..2529a5505c 100644 --- a/site/guide/model-documentation/swap-documentation-templates.qmd +++ b/site/guide/model-documentation/swap-documentation-templates.qmd @@ -2,7 +2,7 @@ title: "Swap documentation templates" date: last-modified aliases: - - ../swap-documentation-templates.html + - /guide/swap-documentation-templates.html --- Swap between different versions of your model documentation or validation report templates within the {{< var validmind.platform >}}. Switch to a completely different template, or apply another version of your current template. diff --git a/site/developer/model-documentation/test-driven-block-menu.png b/site/guide/model-documentation/test-driven-block-menu.png similarity index 100% rename from site/developer/model-documentation/test-driven-block-menu.png rename to site/guide/model-documentation/test-driven-block-menu.png diff --git a/site/developer/model-documentation/test-run-details.gif b/site/guide/model-documentation/test-run-details.gif similarity index 100% rename from site/developer/model-documentation/test-run-details.gif rename to site/guide/model-documentation/test-run-details.gif diff --git a/site/guide/model-documentation/view-documentation-guidelines.qmd b/site/guide/model-documentation/view-documentation-guidelines.qmd index abea197fe5..d40b763e00 100644 --- a/site/guide/model-documentation/view-documentation-guidelines.qmd +++ b/site/guide/model-documentation/view-documentation-guidelines.qmd @@ -2,7 +2,7 @@ title: "View documentation guidelines" date: last-modified aliases: - - ../view-documentation-guidelines.html + - /guide/view-documentation-guidelines.html --- View the guidelines for model documentation associated with a template to ensure that you are compliant with documentation requirements. @@ -38,7 +38,7 @@ By default, the [{{< fa hand >}} Customer Admin]{.bubble} role has these permiss ## What's next -- [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +- [{{< var validmind.developer >}}](/developer/validmind-library.qmd) - [Working with model documentation](working-with-model-documentation.qmd) - [Collaborate on documentation](collaborate-with-others.qmd) @@ -49,7 +49,7 @@ By default, the [{{< fa hand >}} Customer Admin]{.bubble} role has these permiss [^2]: [Working with model documentation](working-with-model-documentation.qmd) -[^3]: [{{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^3]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^4]: [Manage permissions](/guide/configuration/manage-permissions.qmd) diff --git a/site/guide/model-documentation/view-documentation-templates.qmd b/site/guide/model-documentation/view-documentation-templates.qmd index 14a7da0f1d..943b9ac2e3 100644 --- a/site/guide/model-documentation/view-documentation-templates.qmd +++ b/site/guide/model-documentation/view-documentation-templates.qmd @@ -2,7 +2,7 @@ title: "View documentation templates" date: last-modified aliases: - - ../view-documentation-templates.html + - /guide/view-documentation-templates.html --- View the structure and configuration of existing documentation templates within the {{< var vm.platform >}}. {{< var vm.product >}} provides default templates for documentation, validation reports, and ongoing monitoring plans. @@ -45,7 +45,7 @@ View the structure and configuration of existing documentation templates within ## What's next -- [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +- [{{< var validmind.developer >}}](/developer/validmind-library.qmd) - [Collaborate on documentation](collaborate-with-others.qmd) diff --git a/site/guide/model-documentation/work-with-content-blocks.qmd b/site/guide/model-documentation/work-with-content-blocks.qmd index a203c7ce2d..2bbcb38ce4 100644 --- a/site/guide/model-documentation/work-with-content-blocks.qmd +++ b/site/guide/model-documentation/work-with-content-blocks.qmd @@ -2,7 +2,7 @@ title: "Work with content blocks" date: last-modified aliases: - - ../work-with-content-blocks.html + - /guide/work-with-content-blocks.html --- Make edits to your model documentation, validation reports, or ongoing monitoring plans by adding or removing content blocks directly in the online editor. @@ -157,7 +157,7 @@ Test-driven or metric over time blocks can be re-added later on but **text block [^4]: [Manage permissions](/guide/configuration/manage-permissions.qmd) -[^5]: [Work with test results](/developer/model-documentation/work-with-test-results.qmd) +[^5]: [Work with test results](/guide/model-documentation/work-with-test-results.qmd) [^6]: [Work with metrics over time](/guide/monitoring/work-with-metrics-over-time.qmd) diff --git a/site/developer/model-documentation/work-with-test-results.qmd b/site/guide/model-documentation/work-with-test-results.qmd similarity index 96% rename from site/developer/model-documentation/work-with-test-results.qmd rename to site/guide/model-documentation/work-with-test-results.qmd index 8552d1cc3a..c68ab9ad47 100644 --- a/site/developer/model-documentation/work-with-test-results.qmd +++ b/site/guide/model-documentation/work-with-test-results.qmd @@ -1,6 +1,8 @@ --- title: "Work with test results" date: last-modified +aliases: + - /developer/model-documentation/work-with-test-results.html --- Once generated via the {{< var validmind.developer >}}, view and add the test results to your documentation in the {{< var validmind.platform >}}. @@ -95,7 +97,7 @@ Filters can be removed from the list of test result metadata by clicking on the -[^1]: [Document models](document-models.qmd) +[^1]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^2]: [Manage permissions](/guide/configuration/manage-permissions.qmd) diff --git a/site/guide/model-documentation/working-with-documentation-templates.qmd b/site/guide/model-documentation/working-with-documentation-templates.qmd index aa5d28115c..0c802b06de 100644 --- a/site/guide/model-documentation/working-with-documentation-templates.qmd +++ b/site/guide/model-documentation/working-with-documentation-templates.qmd @@ -13,7 +13,7 @@ listing: - customize-documentation-templates.qmd - swap-documentation-templates.qmd aliases: - - ../working-with-documentation-templates.html + - /guide/working-with-documentation-templates.html --- Documentation templates offer a standardized approach to creating consistent and comprehensive model documentation and validation reports. You customize these templates to fit your specific case-by-case needs. diff --git a/site/guide/model-documentation/working-with-model-documentation.qmd b/site/guide/model-documentation/working-with-model-documentation.qmd index 29303e18ff..a95839680a 100644 --- a/site/guide/model-documentation/working-with-model-documentation.qmd +++ b/site/guide/model-documentation/working-with-model-documentation.qmd @@ -10,11 +10,12 @@ listing: contents: - view-documentation-guidelines.qmd - work-with-content-blocks.qmd + - work-with-test-results.qmd - assign-documentation-section-statuses.qmd - collaborate-with-others.qmd - submit-for-approval.qmd aliases: - - ../working-with-model-documentation.html + - /guide/working-with-model-documentation.html --- After you upload initial model documentation through the {{< var vm.developer >}}, use the {{< var vm.platform >}} to make qualitative edits to model documentation, view guidelines, collaborate with validators, and submit your model documentation for approval. @@ -24,7 +25,7 @@ After you upload initial model documentation through the {{< var vm.developer >} This section describes how to work with model documentation in the {{< var validmind.platform >}}. Typically, you perform the tasks described here **after you have uploaded some initial model documentation** with the {{< var validmind.developer >}}. -[Get started with the {{< var validmind.developer >}} {{< fa hand-point-right>}}](/developer/get-started-validmind-library.qmd) +[{{< var validmind.developer >}} {{< fa hand-point-right>}}](/developer/validmind-library.qmd) ::: ::: {.attn} @@ -101,6 +102,6 @@ This section describes how to work with model documentation in the {{< var valid [^1]: [Register models in the inventory](/guide/model-inventory/register-models-in-inventory.qmd) -[^2]: [{{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^2]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^3]: [Manage permissions](/guide/configuration/manage-permissions.qmd) \ No newline at end of file diff --git a/site/guide/model-inventory/_view-model-activity-overview.qmd b/site/guide/model-inventory/_view-model-activity-overview.qmd index 613ea53a1f..f9121355b0 100644 --- a/site/guide/model-inventory/_view-model-activity-overview.qmd +++ b/site/guide/model-inventory/_view-model-activity-overview.qmd @@ -4,7 +4,7 @@ - Inventory fields updated on the model^[[Edit model inventory fields](/guide/model-inventory/edit-model-inventory-fields.qmd)] - Model status transitions^[[Working with model workflows](/guide/model-workflows/working-with-model-workflows.qmd)] - Updates to model documentation, validation reports,^[[Working with model documentation](/guide/model-documentation/working-with-model-documentation.qmd)] or ongoing monitoring plans^[[Ongoing monitoring](/guide/monitoring/ongoing-monitoring.qmd)] -- Test results added to your model^[[{{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd)] +- Test results added to your model^[[{{< var validmind.developer >}}](/developer/validmind-library.qmd)] - Findings added, updated, or removed^[[Working with model findings](/guide/model-validation/working-with-model-findings.qmd)] - Comment creation and replies^[[Collaborate with others (Commenting)](/guide/model-documentation/collaborate-with-others.qmd#commenting)] diff --git a/site/guide/model-inventory/customize-model-inventory-layout.qmd b/site/guide/model-inventory/customize-model-inventory-layout.qmd index 33293a82a5..23f1a0c08e 100644 --- a/site/guide/model-inventory/customize-model-inventory-layout.qmd +++ b/site/guide/model-inventory/customize-model-inventory-layout.qmd @@ -2,7 +2,7 @@ title: "Customize model inventory layout" date: last-modified aliases: - - ../customize-model-inventory-layout.html + - /guide/customize-model-inventory-layout.html --- Swap between inventory views or configure the information that displays by default on the model inventory. diff --git a/site/guide/model-inventory/edit-model-inventory-fields.qmd b/site/guide/model-inventory/edit-model-inventory-fields.qmd index 04c941ac8e..1bfe9fa42e 100644 --- a/site/guide/model-inventory/edit-model-inventory-fields.qmd +++ b/site/guide/model-inventory/edit-model-inventory-fields.qmd @@ -2,7 +2,7 @@ title: "Edit model inventory fields" date: last-modified aliases: - - ../edit-model-inventory-fields.html + - /guide/edit-model-inventory-fields.html --- Edit individual detail fields on a model to ensure that model details are accurate and up-to-date. diff --git a/site/guide/model-inventory/manage-model-inventory-fields.qmd b/site/guide/model-inventory/manage-model-inventory-fields.qmd index 7146567538..cef23efb46 100644 --- a/site/guide/model-inventory/manage-model-inventory-fields.qmd +++ b/site/guide/model-inventory/manage-model-inventory-fields.qmd @@ -3,7 +3,7 @@ title: "Manage model inventory fields" date: last-modified aliases: - manage-inventory-custom-fields.html - - ../manage-inventory-custom-fields.html + - /guide/manage-inventory-custom-fields.html --- Create and edit the fields that appear on all models in your model inventory. Choose from an array of field types with different properties and use cases. diff --git a/site/guide/model-inventory/register-models-in-inventory.qmd b/site/guide/model-inventory/register-models-in-inventory.qmd index 4a103e1e51..55ddfd6ccb 100644 --- a/site/guide/model-inventory/register-models-in-inventory.qmd +++ b/site/guide/model-inventory/register-models-in-inventory.qmd @@ -2,7 +2,7 @@ title: "Register models in the inventory" date: last-modified aliases: - - ../register-models-in-model-inventory.html + - /guide/register-models-in-model-inventory.html --- Register models with {{< var vm.product >}} as the first step towards streamlining your model documentation and validation workflow. diff --git a/site/guide/model-inventory/view-model-activity.qmd b/site/guide/model-inventory/view-model-activity.qmd index 064ad4a217..f459c7efbb 100644 --- a/site/guide/model-inventory/view-model-activity.qmd +++ b/site/guide/model-inventory/view-model-activity.qmd @@ -2,8 +2,8 @@ title: "View model activity" date: last-modified aliases: - - ../model-documentation/view-documentation-activity.html - - ../view-documentation-activity.html + - /guide/model-documentation/view-documentation-activity.html + - /guide/view-documentation-activity.html --- Use the audit trail functionality in the {{< var validmind.platform >}} to track or audit all the information events associated with a specific model. See a record of comments, workflow status changes, and updates made to the model. @@ -71,7 +71,7 @@ Shows test results logged for your model via the {{< var validmind.developer >}} [^1]: [Register models in the inventory](/guide/model-inventory/register-models-in-inventory.qmd) -[^2]: [{{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^2]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^3]: [Collaborate with others (Commenting)](/guide/model-documentation/collaborate-with-others.qmd#commenting) @@ -85,6 +85,6 @@ Shows test results logged for your model via the {{< var validmind.developer >}} [^8]: [Working with model findings](/guide/model-validation/working-with-model-findings.qmd) -[^9]: [Work with test results](/developer/model-documentation/work-with-test-results.qmd) +[^9]: [Work with test results](/guide/model-documentation/work-with-test-results.qmd) diff --git a/site/guide/model-inventory/working-with-model-inventory.qmd b/site/guide/model-inventory/working-with-model-inventory.qmd index 431979d8bf..3bdfa3f6d4 100644 --- a/site/guide/model-inventory/working-with-model-inventory.qmd +++ b/site/guide/model-inventory/working-with-model-inventory.qmd @@ -14,7 +14,7 @@ listing: - edit-model-inventory-fields.qmd - customize-model-overview-page.qmd aliases: - - ../working-with-model-inventory.html + - /guide/working-with-model-inventory.html --- Get started with the {{< var vm.product >}} model inventory, which tracks comprehensive details for all your models throughout the model lifecycle. The model inventory is customizable and extensible, with a layout that can be configured to suit each user's needs. diff --git a/site/guide/model-validation/_assess-compliance-developer-evidence.qmd b/site/guide/model-validation/_assess-compliance-developer-evidence.qmd index 9bcdd5e557..e5405f7bc2 100644 --- a/site/guide/model-validation/_assess-compliance-developer-evidence.qmd +++ b/site/guide/model-validation/_assess-compliance-developer-evidence.qmd @@ -1,5 +1,5 @@ :::: {.content-visible unless-format="revealjs"} -To link evidence logged by developers^[[Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd)] to your validation report: +To link evidence logged by developers^[[{{< var validmind.developer >}}](/developer/validmind-library.qmd)] to your validation report: 1. In the left sidebar, click **{{< fa cubes >}} Inventory**. diff --git a/site/guide/model-validation/assess-compliance.qmd b/site/guide/model-validation/assess-compliance.qmd index 879eefe9c8..c87ade1078 100644 --- a/site/guide/model-validation/assess-compliance.qmd +++ b/site/guide/model-validation/assess-compliance.qmd @@ -2,7 +2,7 @@ title: "Assess compliance" date: last-modified aliases: - - ../assess-compliance.html + - /guide/assess-compliance.html --- Use the {{< var validmind.platform >}} to assess compliance of your models with guidelines based on analyzing evidence and findings. @@ -83,6 +83,6 @@ To link validator evidence to a report, you must first log tests as a validator [^3]: [Manage permissions](/guide/configuration/manage-permissions.qmd) -[^4]: [Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd) +[^4]: [{{< var validmind.developer >}}](/developer/validmind-library.qmd) [^5]: [Working with the model inventory](/guide/model-inventory/working-with-model-inventory.qmd#search-filter-and-sort-models) \ No newline at end of file diff --git a/site/guide/model-validation/manage-validation-guidelines.qmd b/site/guide/model-validation/manage-validation-guidelines.qmd index 92fab34461..28f086da33 100644 --- a/site/guide/model-validation/manage-validation-guidelines.qmd +++ b/site/guide/model-validation/manage-validation-guidelines.qmd @@ -3,7 +3,7 @@ title: "Manage validation guidelines" date: last-modified aliases: - view-validation-guidelines.html - - ../view-validation-guidelines.html + - /guide/view-validation-guidelines.html --- Edit the guidelines for validation reports to align them with organization's requirements, and add the guidelines to your report templates to ensure that they are ready for use. diff --git a/site/guide/model-validation/preparing-validation-reports.qmd b/site/guide/model-validation/preparing-validation-reports.qmd index 1318246909..11d943fec0 100644 --- a/site/guide/model-validation/preparing-validation-reports.qmd +++ b/site/guide/model-validation/preparing-validation-reports.qmd @@ -14,7 +14,7 @@ listing: - ../model-documentation/collaborate-with-others.qmd - ../model-documentation/submit-for-approval.qmd aliases: - - ../preparing-validation-reports.html + - /guide/preparing-validation-reports.html --- Use the {{< var validmind.platform >}} to prepare detailed validation reports in adherence with your organization's guidelines. Link evidence and findings to your reports and assess compliance status at a granular level. diff --git a/site/guide/model-validation/review-model-documentation.qmd b/site/guide/model-validation/review-model-documentation.qmd index 3347b2e5dc..d2445f3c01 100644 --- a/site/guide/model-validation/review-model-documentation.qmd +++ b/site/guide/model-validation/review-model-documentation.qmd @@ -2,7 +2,7 @@ title: "Review model documentation" date: last-modified aliases: - - ../review-model-documentation.html + - /guide/review-model-documentation.html --- Use the {{< var validmind.platform >}} to review the model documentation provided by a model developer as part of the formal validation process. diff --git a/site/guide/model-validation/working-with-model-findings.qmd b/site/guide/model-validation/working-with-model-findings.qmd index 15a0bb27bf..197b404dc2 100644 --- a/site/guide/model-validation/working-with-model-findings.qmd +++ b/site/guide/model-validation/working-with-model-findings.qmd @@ -13,7 +13,7 @@ listing: - add-manage-model-findings.qmd aliases: - work-with-model-findings.html - - ../work-with-model-findings.html + - /guide/work-with-model-findings.html --- Use {{< var validmind.platform >}} to log thorough findings as you validate your models. From status and severity to proposed remediation plans and due dates, {{< var vm.product >}} allows you to oversee the minutiae to ensure organizational compliance. diff --git a/site/guide/model-workflows/customize-model-lifecycle-statuses.qmd b/site/guide/model-workflows/customize-model-lifecycle-statuses.qmd index 6d91dac4e6..806ce48dd5 100644 --- a/site/guide/model-workflows/customize-model-lifecycle-statuses.qmd +++ b/site/guide/model-workflows/customize-model-lifecycle-statuses.qmd @@ -3,7 +3,7 @@ title: "Customize model lifecycle statuses" date: last-modified aliases: - customize-resource-statuses.html - - ../customize-resource-statuses.html + - /guide/customize-resource-statuses.html --- Model lifecycle statuses are manipulated via workflow transitions and are used to track the progress of resources through your organization's processes. diff --git a/site/guide/model-workflows/set-up-model-workflows.qmd b/site/guide/model-workflows/set-up-model-workflows.qmd index bd7d4826a1..02e5e144cb 100644 --- a/site/guide/model-workflows/set-up-model-workflows.qmd +++ b/site/guide/model-workflows/set-up-model-workflows.qmd @@ -2,7 +2,7 @@ title: "Set up model workflows" date: last-modified aliases: - - ../set-up-model-workflows.html + - /guide/set-up-model-workflows.html --- diff --git a/site/guide/model-workflows/working-with-model-workflows.qmd b/site/guide/model-workflows/working-with-model-workflows.qmd index 7455792a19..17ab5d974c 100644 --- a/site/guide/model-workflows/working-with-model-workflows.qmd +++ b/site/guide/model-workflows/working-with-model-workflows.qmd @@ -12,7 +12,7 @@ listing: - customize-model-lifecycle-statuses.qmd - set-up-model-workflows.qmd aliases: - - ../working-with-model-workflows.html + - /guide/working-with-model-workflows.html --- Manage lifecycle processes within your {{< var validmind.platform >}} setup using workflows. Customize both your workflows and your lifecycle statuses to reflect the procedures within your organization. diff --git a/site/guide/reporting/working-with-analytics.qmd b/site/guide/reporting/working-with-analytics.qmd index 21a17a77bd..e84448cdcf 100644 --- a/site/guide/reporting/working-with-analytics.qmd +++ b/site/guide/reporting/working-with-analytics.qmd @@ -2,8 +2,8 @@ title: "Working with analytics" date: last-modified aliases: - - ../view-reports.html - - ../model-validation/view-reports.html + - /guide/view-reports.html + - /guide/model-validation/view-reports.html listing: - id: reports type: grid diff --git a/site/index.qmd b/site/index.qmd index 0a5f17a873..70354635fa 100644 --- a/site/index.qmd +++ b/site/index.qmd @@ -25,9 +25,9 @@ listing: sort: false fields: [title, description] contents: - - /developer/get-started-validmind-library.qmd - - /developer/model-documentation/supported-models.qmd - - /developer/model-documentation/documenting-models.qmd + - /developer/validmind-library.qmd + - /developer/supported-models.qmd + - /developer/samples-jupyter-notebooks.qmd - id: validator type: grid grid-columns: 1 diff --git a/site/internal/testing.qmd b/site/internal/testing.qmd index d51eefdffc..af58b3494f 100644 --- a/site/internal/testing.qmd +++ b/site/internal/testing.qmd @@ -6,7 +6,7 @@ search: false To use the testing landing page, in `_quarto.yml` uncomment out the internal testing sections in the `left` & `sidebar`. -When you're done with your test, please move the folder/files from the `internal` folder into `../internal/testing` (**outside** of `/site`) and comment out the navigation links again in `_quarto.yml`. :) +When you're done with your test, please move the folder/files from the `internal` folder into `~/internal/testing` (**outside** of `/site`) and comment out the navigation links again in `_quarto.yml`. :) ::: {.callout-important} Files created in this section must always be set to `search: false` to prevent them from showing up in the site search. diff --git a/site/notebooks.zip b/site/notebooks.zip index 06efc737df..3daf454cab 100644 Binary files a/site/notebooks.zip and b/site/notebooks.zip differ diff --git a/site/notebooks/README.md b/site/notebooks/README.md index 24a923b91f..d4b8b9dbc6 100644 --- a/site/notebooks/README.md +++ b/site/notebooks/README.md @@ -16,7 +16,7 @@ ValidMind enables organizations to identify, document, and manage model risks fo If this is your first time trying out ValidMind, you can make use of the following resources alongside our sample notebooks: - [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work -- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference +- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference ## Contributing code samples diff --git a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb index dbc49d2f2c..b019c7b457 100644 --- a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb +++ b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models.ipynb @@ -82,7 +82,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", @@ -443,6 +443,9 @@ "metadata": {}, "outputs": [], "source": [ + "N = 10000\n", + "M = 100\n", + "\n", "# Parameters for synthetic data\n", "S0 = 100\n", "K = 100\n", diff --git a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb index 32f9cbce21..e251918470 100644 --- a/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb +++ b/site/notebooks/code_samples/capital_markets/quickstart_option_pricing_models_quantlib.ipynb @@ -120,7 +120,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_demo.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_demo.ipynb index 7b3bc0ec6b..5bb5985f4e 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_demo.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_demo.ipynb @@ -86,7 +86,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb index 3ee2b1e6bb..349cfd30c6 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_executive.ipynb @@ -37,7 +37,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb index 750ebc9672..2c91302c14 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_full_suite.ipynb @@ -37,7 +37,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb index 59d507bf2c..0d6f4e270e 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_with_bias.ipynb @@ -75,7 +75,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n" + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n" ] }, { diff --git a/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb b/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb index 26a983f10d..ca1bdb4e36 100644 --- a/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb +++ b/site/notebooks/code_samples/credit_risk/application_scorecard_with_ml.ipynb @@ -37,7 +37,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb b/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb index bff8c13619..34ed27e575 100644 --- a/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb +++ b/site/notebooks/code_samples/custom_tests/implement_custom_tests.ipynb @@ -78,7 +78,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", @@ -567,28 +567,51 @@ "outputs": [], "source": [ "import requests\n", + "import random\n", "\n", "\n", "@vm.test(\"my_custom_tests.ExternalAPI\")\n", "def external_api():\n", - " \"\"\"This test calls an external API to get the current BTC price. It then creates\n", + " \"\"\"This test calls an external API to get a list of fake users. It then creates\n", " a table with the relevant data so it can be displayed in the documentation.\n", "\n", " The purpose of this test is to demonstrate how to call an external API and use the\n", " data in a test. A test like this could even be setup to run in a scheduled\n", " pipeline to keep your documentation in-sync with an external data source.\n", " \"\"\"\n", - " url = \"https://api.coindesk.com/v1/bpi/currentprice.json\"\n", + " url = \"https://jsonplaceholder.typicode.com/users\"\n", " response = requests.get(url)\n", " data = response.json()\n", "\n", " # extract the time and the current BTC price in USD\n", - " return [\n", - " {\n", - " \"Time\": data[\"time\"][\"updated\"],\n", - " \"Price (USD)\": data[\"bpi\"][\"USD\"][\"rate\"],\n", - " }\n", - " ]\n", + " return {\n", + " \"Model Owners/Stakeholders\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": random.choice([\"Owner\", \"Stakeholder\"]),\n", + " \"Email\": user[\"email\"],\n", + " \"Phone\": user[\"phone\"],\n", + " \"Slack Handle\": f\"@{user['name'].lower().replace(' ', '.')}\",\n", + " }\n", + " for user in data[:3]\n", + " ],\n", + " \"Model Developers\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": \"Developer\",\n", + " \"Email\": user[\"email\"],\n", + " }\n", + " for user in data[3:7]\n", + " ],\n", + " \"Model Validators\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": \"Validator\",\n", + " \"Email\": user[\"email\"],\n", + " }\n", + " for user in data[7:]\n", + " ],\n", + " }\n", "\n", "\n", "result = run_test(\"my_custom_tests.ExternalAPI\")\n", @@ -601,7 +624,7 @@ "source": [ "Again, you can add this to your documentation to see how it looks:\n", "\n", - "![screenshot showing BTC price metric](../../images/btc-price-custom-metric.png)" + "![screenshot showing BTC price metric](../../images/external-data-custom-test.png)" ] }, { @@ -944,7 +967,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb b/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb index 921105744d..d977817e7b 100644 --- a/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb +++ b/site/notebooks/code_samples/custom_tests/integrate_external_test_providers.ipynb @@ -114,7 +114,7 @@ "If this is your first time trying out ValidMind, we recommend going through the following resources first:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n", + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n", "\n", "\n", "\n", @@ -352,31 +352,54 @@ "outputs": [], "source": [ "import requests\n", + "import random\n", "\n", "\n", "@test(\"my_custom_tests.ExternalAPI\")\n", "def external_api():\n", - " \"\"\"This test calls an external API to get the current BTC price. It then creates\n", + " \"\"\"This test calls an external API to get a list of fake users. It then creates\n", " a table with the relevant data so it can be displayed in the documentation.\n", "\n", " The purpose of this test is to demonstrate how to call an external API and use the\n", " data in a test. A test like this could even be setup to run in a scheduled\n", " pipeline to keep your documentation in-sync with an external data source.\n", " \"\"\"\n", - " url = \"https://api.coindesk.com/v1/bpi/currentprice.json\"\n", + " url = \"https://jsonplaceholder.typicode.com/users\"\n", " response = requests.get(url)\n", " data = response.json()\n", "\n", " # extract the time and the current BTC price in USD\n", - " return [\n", - " {\n", - " \"Time\": data[\"time\"][\"updated\"],\n", - " \"Price (USD)\": data[\"bpi\"][\"USD\"][\"rate\"],\n", - " }\n", - " ]\n", - "\n", - "\n", - "external_api.save(tests_folder, imports=[\"import requests\"])" + " return {\n", + " \"Model Owners/Stakeholders\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": random.choice([\"Owner\", \"Stakeholder\"]),\n", + " \"Email\": user[\"email\"],\n", + " \"Phone\": user[\"phone\"],\n", + " \"Slack Handle\": f\"@{user['name'].lower().replace(' ', '.')}\",\n", + " }\n", + " for user in data[:3]\n", + " ],\n", + " \"Model Developers\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": \"Developer\",\n", + " \"Email\": user[\"email\"],\n", + " }\n", + " for user in data[3:7]\n", + " ],\n", + " \"Model Validators\": [\n", + " {\n", + " \"Name\": user[\"name\"],\n", + " \"Role\": \"Validator\",\n", + " \"Email\": user[\"email\"],\n", + " }\n", + " for user in data[7:]\n", + " ],\n", + " }\n", + "\n", + "\n", + "external_api.save(tests_folder, imports=[\"import requests\", \"import random\"])" ] }, { @@ -904,7 +927,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb index e76caab882..5fb7b62210 100644 --- a/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb @@ -22,7 +22,7 @@ "If this is your first time trying out ValidMind, we recommend going through the following resources first:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n" + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n" ] }, { @@ -322,7 +322,7 @@ "\n", "What you can see now is a more easily consumable version of the prompt validation testing you just performed, along with other parts of your model documentation that still need to be completed.\n", "\n", - "If you want to learn more about where you are in the model documentation process, take a look at [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html).\n" + "If you want to learn more about where you are in the model documentation process, take a look our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html).\n" ] }, { diff --git a/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb index ef70b9ddd2..9ae50dbb9a 100644 --- a/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/foundation_models_summarization_demo.ipynb @@ -37,7 +37,7 @@ "If this is your first time trying out ValidMind, we recommend going through the following resources first:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n" + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n" ] }, { @@ -367,7 +367,7 @@ "\n", "What you can see now is a more easily consumable version of the prompt validation testing you just performed, along with other parts of your model documentation that still need to be completed.\n", "\n", - "If you want to learn more about where you are in the model documentation process, take a look at [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html).\n" + "If you want to learn more about where you are in the model documentation process, take a look our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html).\n" ] }, { diff --git a/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb index 0c5d996ad8..ee51ab20bf 100644 --- a/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/hugging_face_integration_demo.ipynb @@ -22,7 +22,7 @@ "If this is your first time trying out ValidMind, we recommend going through the following resources first:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n" + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n" ] }, { @@ -294,7 +294,7 @@ "\n", "What you can see now is a more easily consumable version of the prompt validation testing you just performed, along with other parts of your model documentation that still need to be completed.\n", "\n", - "If you want to learn more about where you are in the model documentation process, take a look at [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html).\n" + "If you want to learn more about where you are in the model documentation process, take a look our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html).\n" ] }, { diff --git a/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb index f4042e960a..c68e562d78 100644 --- a/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/hugging_face_summarization_demo.ipynb @@ -23,7 +23,7 @@ "If this is your first time trying out ValidMind, we recommend going through the following resources first:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n" + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n" ] }, { @@ -326,7 +326,7 @@ "\n", "What you can see now is a more easily consumable version of the prompt validation testing you just performed, along with other parts of your model documentation that still need to be completed.\n", "\n", - "If you want to learn more about where you are in the model documentation process, take a look at [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html).\n" + "If you want to learn more about where you are in the model documentation process, take a look our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html).\n" ] }, { diff --git a/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb index 8fef07bd3f..e7ac596845 100644 --- a/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/llm_summarization_demo.ipynb @@ -45,7 +45,7 @@ "If this is your first time trying out ValidMind, you can make use of the following resources alongside this notebook:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n" + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n" ] }, { @@ -814,7 +814,7 @@ "\n", "What you can see now is a more easily consumable version of the prompt validation testing you just performed, along with other parts of your model documentation that still need to be completed.\n", "\n", - "If you want to learn more about where you are in the model documentation process, take a look at [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html)." + "If you want to learn more about where you are in the model documentation process, take a look at our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html)." ] }, { diff --git a/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb index b32845f16c..0f88228e1e 100644 --- a/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/prompt_validation_demo.ipynb @@ -66,7 +66,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb b/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb index 1070f604d8..f6942033ef 100644 --- a/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb +++ b/site/notebooks/code_samples/nlp_and_llm/rag_documentation_demo.ipynb @@ -33,7 +33,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb b/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb index ab5d6d4bf6..e4e48884d3 100644 --- a/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb +++ b/site/notebooks/code_samples/ongoing_monitoring/application_scorecard_ongoing_monitoring.ipynb @@ -33,7 +33,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb b/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb index d489beb2da..156f3fb142 100644 --- a/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb +++ b/site/notebooks/code_samples/ongoing_monitoring/quickstart_customer_churn_ongoing_monitoring.ipynb @@ -74,7 +74,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb b/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb index e1e329e133..461d8c2896 100644 --- a/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb +++ b/site/notebooks/code_samples/regression/quickstart_regression_full_suite.ipynb @@ -29,7 +29,7 @@ "If this is your first time trying out ValidMind, you can make use of the following resources alongside this notebook:\n", "\n", "- [Get started](https://docs.validmind.ai/get-started/get-started.html) — The basics, including key concepts, and how our products work\n", - "- [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html) — The path for developers, more code samples, and our developer reference\n" + "- [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html) — The path for developers, more code samples, and our developer reference\n" ] }, { @@ -447,7 +447,7 @@ "\n", "What you can see now is a much more easily consumable version of the documentation, including the results of the tests you just performed, along with other parts of your model documentation that still need to be completed. There is a wealth of information that gets uploaded when you run the full test suite, so take a closer look around, especially at test results that might need attention (hint: some of the tests in **2.1 Data description** look like they need some attention).\n", "\n", - "If you want to learn more about where you are in the model documentation process, take a look at [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html).\n" + "If you want to learn more about where you are in the model documentation process, take a look our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html).\n" ] }, { diff --git a/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb b/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb index 174b056860..edd3ca9b57 100644 --- a/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb +++ b/site/notebooks/code_samples/time_series/quickstart_time_series_full_suite.ipynb @@ -77,7 +77,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb b/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb index 4445327500..8873b85243 100644 --- a/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb +++ b/site/notebooks/code_samples/time_series/quickstart_time_series_high_code.ipynb @@ -77,7 +77,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/configure_dataset_features.ipynb b/site/notebooks/how_to/configure_dataset_features.ipynb index 43be9c4d20..9bf927740b 100644 --- a/site/notebooks/how_to/configure_dataset_features.ipynb +++ b/site/notebooks/how_to/configure_dataset_features.ipynb @@ -67,7 +67,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb b/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb index 2a46f0f203..8dc4ab10df 100644 --- a/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb +++ b/site/notebooks/how_to/document_multiple_results_for_the_same_test.ipynb @@ -82,7 +82,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/explore_test_suites.ipynb b/site/notebooks/how_to/explore_test_suites.ipynb index 05bf407036..7cb5e2e49f 100644 --- a/site/notebooks/how_to/explore_test_suites.ipynb +++ b/site/notebooks/how_to/explore_test_suites.ipynb @@ -63,7 +63,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/load_datasets_predictions.ipynb b/site/notebooks/how_to/load_datasets_predictions.ipynb index 3d73530d52..81c4e0e981 100644 --- a/site/notebooks/how_to/load_datasets_predictions.ipynb +++ b/site/notebooks/how_to/load_datasets_predictions.ipynb @@ -79,7 +79,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/log_metrics_over_time.ipynb b/site/notebooks/how_to/log_metrics_over_time.ipynb index 9cef4c5402..d551d58ff1 100644 --- a/site/notebooks/how_to/log_metrics_over_time.ipynb +++ b/site/notebooks/how_to/log_metrics_over_time.ipynb @@ -78,7 +78,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/run_documentation_sections.ipynb b/site/notebooks/how_to/run_documentation_sections.ipynb index baca51bfde..b7b43e3793 100644 --- a/site/notebooks/how_to/run_documentation_sections.ipynb +++ b/site/notebooks/how_to/run_documentation_sections.ipynb @@ -73,7 +73,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/run_documentation_tests_with_config.ipynb b/site/notebooks/how_to/run_documentation_tests_with_config.ipynb index 82c969c47b..0eea64a465 100644 --- a/site/notebooks/how_to/run_documentation_tests_with_config.ipynb +++ b/site/notebooks/how_to/run_documentation_tests_with_config.ipynb @@ -77,7 +77,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb b/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb index 8ed766f900..dfbc4a0de7 100644 --- a/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb +++ b/site/notebooks/how_to/run_tests/1_run_dataset_based_tests.ipynb @@ -72,7 +72,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb b/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb index 2564d8ef46..9370fc98be 100644 --- a/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb +++ b/site/notebooks/how_to/run_tests/2_run_comparison_tests.ipynb @@ -79,7 +79,7 @@ "\n", "\n", "### New to ValidMind?\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb b/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb index 1d9cdb02b5..9fde462201 100644 --- a/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb +++ b/site/notebooks/how_to/run_tests_that_require_multiple_datasets.ipynb @@ -75,7 +75,7 @@ "\n", "### New to ValidMind?\n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/run_unit_metrics.ipynb b/site/notebooks/how_to/run_unit_metrics.ipynb index 77c92066d9..c6469e7ac9 100644 --- a/site/notebooks/how_to/run_unit_metrics.ipynb +++ b/site/notebooks/how_to/run_unit_metrics.ipynb @@ -104,7 +104,7 @@ "\n", "### New to ValidMind? \n", "\n", - "If you haven't already seen our [Get started with the ValidMind Library](https://docs.validmind.ai/developer/get-started-validmind-library.html), we recommend you explore the available resources for developers at some point. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", "\n", "
For access to all features available in this notebook, create a free ValidMind account.\n", "

\n", diff --git a/site/notebooks/how_to/understand_utilize_rawdata.ipynb b/site/notebooks/how_to/understand_utilize_rawdata.ipynb index 7354eae858..93d664a7c6 100644 --- a/site/notebooks/how_to/understand_utilize_rawdata.ipynb +++ b/site/notebooks/how_to/understand_utilize_rawdata.ipynb @@ -31,7 +31,7 @@ " - [Pearson Correlation Matrix](#toc2_2_) \n", " - [Precision-Recall Curve](#toc2_3_) \n", " - [Using `RawData` in custom tests](#toc2_4_) \n", - "\n", + " - [Using `RawData` in comparison tests](#toc2_5_) \n", ":::\n", "\n", + "" + ] + }, + { + "cell_type": "markdown", + "id": "d78e3887", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Introduction\n", + "\n", + "Model development aims to produce a fit-for-purpose *champion model* by conducting thorough testing and analysis, supporting the capabilities of the model with evidence in the form of documentation and test results. Model documentation should be clear and comprehensive, ideally following a structure or template covering all aspects of compliance with model risk regulation.\n", + "\n", + "A *binary classification model* is a type of predictive model used in churn analysis to identify customers who are likely to leave a service or subscription by analyzing various behavioral, transactional, and demographic factors.\n", + "\n", + "- This model helps businesses take proactive measures to retain at-risk customers by offering personalized incentives, improving customer service, or adjusting pricing strategies.\n", + "- Effective validation of a churn prediction model ensures that businesses can accurately identify potential churners, optimize retention efforts, and enhance overall customer satisfaction while minimizing revenue loss." + ] + }, + { + "cell_type": "markdown", + "id": "f40a5e0a", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models. \n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators." + ] + }, + { + "cell_type": "markdown", + "id": "12af6ba2", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html)." + ] + }, + { + "cell_type": "markdown", + "id": "5f9cc87c", + "metadata": {}, + "source": [ + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models, find code samples, or read our developer reference.\n", + "\n", + "
For access to all features available in this notebook, create a free ValidMind account.\n", + "

\n", + "Signing up is FREE — Register with ValidMind
" + ] + }, + { + "cell_type": "markdown", + "id": "31c5cde0", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Metrics**: A subset of tests that do not have thresholds. In the context of this notebook, metrics and tests can be thought of as interchangeable concepts.\n", + "\n", + "**Custom metrics**: Custom metrics are functions that you define to evaluate your model or dataset. These functions can be registered with the ValidMind Library to be used in the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom metrics can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." + ] + }, + { + "cell_type": "markdown", + "id": "1c06378f", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Initializing the ValidMind Library\n", + "\n", + "The ValidMind Library provides a rich collection of documentation tools and test suites, from documenting descriptions of datasets to validation and testing of models using a variety of open-source testing frameworks." + ] + }, + { + "cell_type": "markdown", + "id": "00f99235", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "
Recommended Python versions\n", + "

\n", + "Python 3.8 <= x <= 3.11
\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8883bbc3", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "id": "780b6b39", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook." + ] + }, + { + "cell_type": "markdown", + "id": "ec5bdcec", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Continue**. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + " For example, to register a model for use with this notebook, select:" + ] + }, + { + "cell_type": "markdown", + "id": "d00f6f07", + "metadata": {}, + "source": [ + " - Documentation template: `Binary classification`\n", + " - Use case: `Marketing/Sales - Attrition/Churn Management`" + ] + }, + { + "cell_type": "markdown", + "id": "install-credentials-50e67128-2eb5-470a-aeaf-1c692fd3f847", + "metadata": {}, + "source": [ + " You can fill in other options according to your preference.\n", + " \n", + "4. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f22e91d", + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "c3186121", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Getting to know ValidMind" + ] + }, + { + "cell_type": "markdown", + "id": "3b4c604d", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Preview the documentation template\n", + "\n", + "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32ab4cac", + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "id": "6f5341af", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Explore available tests\n", + "\n", + "Next, let's explore the list of all available tests in the ValidMind Library with [the `vm.tests.list_tests()` function](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) — we'll learn how to run tests shortly. \n", + "\n", + "You can see that the documentation template for this model has references to some of the **test `ID`s used to run tests listed below:**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acf76128", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.list_tests()" + ] + }, + { + "cell_type": "markdown", + "id": "4327631b", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "95bede03", + "metadata": {}, + "outputs": [], + "source": [ + "%pip show validmind" + ] + }, + { + "cell_type": "markdown", + "id": "540efef8", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "6a7bf101", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "id": "207875f2", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this first notebook, you learned how to:\n", + "\n", + "- [ ] Register a model within the ValidMind Platform\n", + "- [ ] Install and initialize the ValidMind Library\n", + "- [ ] Preview the documentation template for your model\n", + "- [ ] Explore the available tests offered by the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "id": "29781eb4", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "id": "4eb45e03", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Start the model development process\n", + "\n", + "Now that the ValidMind Library is connected to your model in the ValidMind Library with the correct template applied, we can go ahead and start the model development process: **[102 Start the model development process](102-start_development_process.ipynb)**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "name": "python", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/site/notebooks/tutorials/model_development/102-start_development_process.ipynb b/site/notebooks/tutorials/model_development/102-start_development_process.ipynb new file mode 100644 index 0000000000..b64c31426a --- /dev/null +++ b/site/notebooks/tutorials/model_development/102-start_development_process.ipynb @@ -0,0 +1,959 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ValidMind for model development — 102 Start the model development process\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process with our series of four introductory notebooks. In this second notebook, you'll run tests and investigate results, then add the results or evidence to your documentation.\n", + "\n", + "You'll become familiar with the individual tests available in ValidMind, as well as how to run them and change parameters as necessary. Using ValidMind's repository of individual tests as building blocks helps you ensure that a model is being built appropriately. \n", + "\n", + "**For a full list of out-of-the-box tests,** refer to our [Test descriptions](https://docs.validmind.ai/developer/model-testing/test-descriptions.html) or try the interactive [Test sandbox](https://docs.validmind.ai/developer/model-testing/test-sandbox.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Prerequisites](#toc1_) \n", + "- [Setting up](#toc2_) \n", + " - [Initialize the ValidMind Library](#toc2_1_) \n", + " - [Import sample dataset](#toc2_2_) \n", + " - [Identify qualitative tests](#toc2_3_) \n", + " - [Initialize the ValidMind datasets](#toc2_4_) \n", + "- [Running tests](#toc3_) \n", + " - [Run tabular data tests](#toc3_1_) \n", + " - [Utilize test output](#toc3_2_) \n", + "- [Documenting results](#toc4_) \n", + " - [Run and log multiple tests](#toc4_1_) \n", + " - [Run and log an individual test](#toc4_2_) \n", + " - [Add individual test results to model documentation](#toc4_2_1_) \n", + "- [Model testing](#toc5_) \n", + " - [Train simple logistic regression model](#toc5_1_) \n", + " - [Initialize model evaluation objects](#toc5_2_) \n", + " - [Assign predictions](#toc5_3_) \n", + " - [Run the model evaluation tests](#toc5_4_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Integrate custom tests](#toc7_1_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prerequisites\n", + "\n", + "In order to log test results or evidence to your model documentation with this notebook, you'll need to first have:\n", + "\n", + "- [ ] Registered a model within the ValidMind Platform with a predefined documentation template\n", + "- [ ] Installed the ValidMind Library in your local environment, allowing you to access all its features\n", + "\n", + "
Need help with the above steps?\n", + "

\n", + "Refer to the first notebook in this series: 101 Set up ValidMind
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "First, let's connect up the ValidMind Library to our model we previously registered in the ValidMind Platform:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure the ValidMind Library is installed\n", + "\n", + "%pip install -q validmind\n", + "\n", + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Import sample dataset\n", + "\n", + "Then, let's import the public [Bank Customer Churn Prediction](https://www.kaggle.com/datasets/shantanudhakadd/bank-customer-churn-prediction) dataset from Kaggle. \n", + "\n", + "In our below example, note that: \n", + "\n", + "- The target column, `Exited` has a value of `1` when a customer has churned and `0` otherwise.\n", + "- The ValidMind Library provides a wrapper to automatically load the dataset as a Pandas DataFrame object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()\n", + "raw_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Identify qualitative tests\n", + "\n", + "Next, let's say we want to do some data quality assessments by running a few individual tests.\n", + "\n", + "Use the [`vm.tests.list_tests()` function](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) introduced by the first notebook in this series in combination with [`vm.tests.list_tags()`](https://docs.validmind.ai/validmind/validmind/tests.html#list_tags) and [`vm.tests.list_tasks()`](https://docs.validmind.ai/validmind/validmind/tests.html#list_tasks) to find which prebuilt tests are relevant for data quality assessment:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the list of available tags\n", + "sorted(vm.tests.list_tags())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the list of available task types\n", + "sorted(vm.tests.list_tasks())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can pass `tags` and `tasks` as parameters to the `vm.tests.list_tests()` function to filter the tests based on the tags and task types.\n", + "\n", + "For example, to find tests related to tabular data quality for classification models, you can call `list_tests()` like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.list_tests(task=\"classification\", tags=[\"tabular_data\", \"data_quality\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind datasets\n", + "\n", + "With the individual tests we want to run identified, the next step is to connect your data with a ValidMind `Dataset` object. **This step is always necessary every time you want to connect a dataset to documentation and produce test results through ValidMind,** but you only need to do it once per dataset.\n", + "\n", + "Initialize a ValidMind dataset object using the [`init_dataset` function](https://docs.validmind.ai/validmind/validmind.html#init_dataset) from the ValidMind (`vm`) module. For this example, we'll pass in the following arguments:\n", + "\n", + "- **`dataset`** — The raw dataset that you want to provide as input to tests.\n", + "- **`input_id`** — A unique identifier that allows tracking what inputs are used when running each individual test.\n", + "- **`target_column`** — A required argument if tests require access to true values. This is the name of the target column in the dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# vm_raw_dataset is now a VMDataset object that you can pass to any ValidMind test\n", + "vm_raw_dataset = vm.init_dataset(\n", + " dataset=raw_df,\n", + " input_id=\"raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Running tests\n", + "\n", + "Now that we know how to initialize a ValidMind `dataset` object, we're ready to run some tests!\n", + "\n", + "You run individual tests by calling [the `run_test` function](https://docs.validmind.ai/validmind/validmind/tests.html#run_test) provided by the `validmind.tests` module. For the examples below, we'll pass in the following arguments:\n", + "\n", + "- **`test_id`** — The ID of the test to run, as seen in the `ID` column when you run `list_tests`. \n", + "- **`params`** — A dictionary of parameters for the test. These will override any `default_params` set in the test definition. \n", + "\n", + "
Want to learn more about ValidMind tests?\n", + "

\n", + "Refer to our notebook that includes code samples and usage of key functions: Explore tests
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run tabular data tests\n", + "\n", + "The inputs expected by a test can also be found in the test definition — let's take [`validmind.data_validation.DescriptiveStatistics`](https://docs.validmind.ai/tests/data_validation/DescriptiveStatistics.html) as an example.\n", + "\n", + "Note that the output of the [`describe_test()` function](https://docs.validmind.ai/validmind/validmind/tests.html#describe_test) below shows that this test expects a `dataset` as input:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.describe_test(\"validmind.data_validation.DescriptiveStatistics\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's run a few tests to assess the quality of the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.DescriptiveStatistics\",\n", + " inputs={\"dataset\": vm_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result2 = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.ClassImbalance\",\n", + " inputs={\"dataset\": vm_raw_dataset},\n", + " params={\"min_percent_threshold\": 30},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output above shows that [the class imbalance test](https://docs.validmind.ai/tests/data_validation/ClassImbalance.html) did not pass according to the value we set for `min_percent_threshold`.\n", + "\n", + "To address this issue, we'll re-run the test on some processed data. In this case let's apply a very simple rebalancing technique to the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "raw_copy_df = raw_df.sample(frac=1) # Create a copy of the raw dataset\n", + "\n", + "# Create a balanced dataset with the same number of exited and not exited customers\n", + "exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 1]\n", + "not_exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 0].sample(n=exited_df.shape[0])\n", + "\n", + "balanced_raw_df = pd.concat([exited_df, not_exited_df])\n", + "balanced_raw_df = balanced_raw_df.sample(frac=1, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With this new balanced dataset, you can re-run the individual test to see if it now passes the class imbalance test requirement.\n", + "\n", + "As this is technically a different dataset, **remember to first initialize a new ValidMind `Dataset` object** to pass in as input as required by `run_test()`:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register new data and now 'balanced_raw_dataset' is the new dataset object of interest\n", + "vm_balanced_raw_dataset = vm.init_dataset(\n", + " dataset=balanced_raw_df,\n", + " input_id=\"balanced_raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the initialized `balanced_raw_dataset` as input into the test run\n", + "result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.ClassImbalance\",\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + " params={\"min_percent_threshold\": 30},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "### Utilize test output\n", + "\n", + "You can utilize the output from a ValidMind test for further use, for example, if you want to remove highly correlated features. Below we demonstrate how to retrieve the list of features with the highest correlation coefficients and use them to reduce the final list of features for modeling.\n", + "\n", + "First, we'll run [`validmind.data_validation.HighPearsonCorrelation`](https://docs.validmind.ai/tests/data_validation/HighPearsonCorrelation.html) with the `balanced_raw_dataset` we initialized previously as input as is for comparison with later runs:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`corr_result` is an object of type `TestResult`. We can inspect the result object to see what the test has produced:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "print(type(corr_result))\n", + "print(\"Result ID: \", corr_result.result_id)\n", + "print(\"Params: \", corr_result.params)\n", + "print(\"Passed: \", corr_result.passed)\n", + "print(\"Tables: \", corr_result.tables)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's remove the highly correlated features and create a new VM `dataset` object.\n", + "\n", + "We'll begin by checking out the table in the result and extracting a list of features that failed the test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Extract table from `corr_result.tables`\n", + "features_df = corr_result.tables[0].data\n", + "features_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Extract list of features that failed the test\n", + "high_correlation_features = features_df[features_df[\"Pass/Fail\"] == \"Fail\"][\"Columns\"].tolist()\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, extract the feature names from the list of strings (example: `(Age, Exited)` > `Age`):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_correlation_features = [feature.split(\",\")[0].strip(\"()\") for feature in high_correlation_features]\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, it's time to re-initialize the dataset with the highly correlated features removed.\n", + "\n", + "**Note the use of a different `input_id`.** This allows tracking the inputs used when running each individual test." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Remove the highly correlated features from the dataset\n", + "balanced_raw_no_age_df = balanced_raw_df.drop(columns=high_correlation_features)\n", + "\n", + "# Re-initialize the dataset object\n", + "vm_raw_dataset_preprocessed = vm.init_dataset(\n", + " dataset=balanced_raw_no_age_df,\n", + " input_id=\"raw_dataset_preprocessed\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Re-running the test with the reduced feature set should pass the test:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also plot the correlation matrix to visualize the new correlation between features:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.PearsonCorrelationMatrix\",\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Documenting results\n", + "\n", + "We've now done some analysis on two different datasets, and we should be able to document why certain things were done to the raw data with testing to support it.\n", + "\n", + "Every test result returned by the `run_test()` function has a [`.log()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#TestResult.log) that can be used to send the test results to the ValidMind Platform:\n", + "\n", + "- When using `run_documentation_tests()`, documentation sections will be automatically populated with the results of all tests registered in the documentation template.\n", + "- When logging individual test results to the platform, you'll need to manually add those results to the desired section of the model documentation.\n", + "\n", + "To demonstrate how to add test results to your model documentation, we'll populate the entire **Data Preparation** section of the documentation using the clean `vm_raw_dataset_preprocessed` dataset as input, and then document an additional individual result for the highly correlated dataset `vm_balanced_raw_dataset`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "### Run and log multiple tests\n", + "\n", + "[`run_documentation_tests()`](https://docs.validmind.ai/validmind/validmind.html#run_documentation_tests) allows you to run multiple tests at once and automatically log the results to your documentation. Below, we'll run the tests using the previously initialized `vm_raw_dataset_preprocessed` as input — this will populate the entire **Data Preparation** section for every test that is part of the documentation template.\n", + "\n", + "For this example, we'll pass in the following arguments:\n", + "\n", + "- **`inputs`:** Any inputs to be passed to the tests.\n", + "- **`config`:** A dictionary `:` that allows configuring each test individually. Each test config requires the following:\n", + " - **`params`:** Individual test parameters.\n", + " - **`inputs`:** Individual test inputs. This overrides any inputs passed from the `run_documentation_tests()` function.\n", + "\n", + "When including explicit configuration for individual tests, you'll need to specify the `inputs` even if they mirror what is included in your global configuration.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Individual test config with inputs specified\n", + "test_config = {\n", + " \"validmind.data_validation.ClassImbalance\": {\n", + " \"params\": {\"min_percent_threshold\": 30},\n", + " \"inputs\": {\"dataset\": vm_raw_dataset_preprocessed},\n", + " },\n", + " \"validmind.data_validation.HighPearsonCorrelation\": {\n", + " \"params\": {\"max_threshold\": 0.3},\n", + " \"inputs\": {\"dataset\": vm_raw_dataset_preprocessed},\n", + " },\n", + "}\n", + "\n", + "# Global test config\n", + "tests_suite = vm.run_documentation_tests(\n", + " inputs={\n", + " \"dataset\": vm_raw_dataset_preprocessed,\n", + " },\n", + " config=test_config,\n", + " section=[\"data_preparation\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run and log an individual test\n", + "\n", + "Next, we'll use the previously initialized `vm_balanced_raw_dataset` (that had a highly correlated `Age` column) as input to run an individual test, then log the result to the ValidMind Platform.\n", + "\n", + "When running individual tests, **you can use a custom `result_id` to tag the individual result with a unique identifier:** \n", + "\n", + "- This `result_id` can be appended to `test_id` with a `:` separator.\n", + "- The `balanced_raw_dataset` result identifier will correspond to the `balanced_raw_dataset` input, the dataset that still has the `Age` column." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation:balanced_raw_dataset\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")\n", + "result.log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Note the output returned indicating that a test-driven block doesn't currently exist in your model's documentation for this particular test ID. \n", + "

\n", + "That's expected, as when we run individual tests the results logged need to be manually added to your documentation within the ValidMind Platform.
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Add individual test results to model documentation\n", + "\n", + "With the test results logged, let's head to the model we connected to at the beginning of this notebook and insert our test results into the documentation ([Need more help?](https://docs.validmind.ai/developer/model-documentation/work-with-test-results.html)):\n", + "\n", + "1. From the **Inventory** in the ValidMind Platform, go to the model you connected to earlier.\n", + "\n", + "2. In the left sidebar that appears for your model, click **Documentation**.\n", + "\n", + "3. Locate the Data Preparation section and click on **2.3 Correlations and Interactions** to expand that section.\n", + "\n", + "4. Hover under the Pearson Correlation Matrix content block until a horizontal dashed line with a **+** button appears, indicating that you can insert a new block.\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "5. Click **+** and then select **Test-Driven Block**:\n", + "\n", + " - In the search bar, type in `HighPearsonCorrelation`.\n", + " - Select `HighPearsonCorrelation:balanced_raw_dataset` as the test.\n", + "\n", + " A preview of the test gets shown:\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "6. Finally, click **Insert 1 Test Result to Document** to add the test result to the documentation.\n", + "\n", + " Confirm that the individual results for the high correlation test has been correctly inserted into section **2.3 Correlations and Interactions** of the documentation.\n", + "\n", + "7. Finalize the documentation by editing the test result's description block to explain the changes you made to the raw data and the reasons behind them as shown in the screenshot below:\n", + "\n", + " \"Screenshot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Model testing\n", + "\n", + "So far, we've focused on the data assessment and pre-processing that usually occurs prior to any models being built. Now, let's instead assume we have already built a model and we want to incorporate some model results into our documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Train simple logistic regression model\n", + "\n", + "Using ValidMind tests, we'll train a simple logistic regression model on our dataset and evaluate its performance by using the `LogisticRegression` class from the `sklearn.linear_model`.\n", + "\n", + "To start, let's grab the first few rows from the `balanced_raw_no_age_df` dataset with the highly correlated features removed we initialized earlier:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before training the model, we need to encode the categorical features in the dataset:\n", + "\n", + "- Use the `OneHotEncoder` class from the `sklearn.preprocessing` module to encode the categorical features.\n", + "- The categorical features in the dataset are `Geography` and `Gender`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "balanced_raw_no_age_df = pd.get_dummies(\n", + " balanced_raw_no_age_df, columns=[\"Geography\", \"Gender\"], drop_first=True\n", + ")\n", + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using `GridSearchCV`, we'll find the best-performing hyperparameters or settings and save them:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "# Split the input and target variables\n", + "X = balanced_raw_no_age_df.drop(\"Exited\", axis=1)\n", + "y = balanced_raw_no_age_df[\"Exited\"]\n", + "X_train, X_test, y_train, y_test = train_test_split(\n", + " X,\n", + " y,\n", + " test_size=0.2,\n", + " random_state=42,\n", + ")\n", + "\n", + "# Logistic Regression grid params\n", + "log_reg_params = {\n", + " \"penalty\": [\"l1\", \"l2\"],\n", + " \"C\": [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n", + " \"solver\": [\"liblinear\"],\n", + "}\n", + "\n", + "# Grid search for Logistic Regression\n", + "from sklearn.model_selection import GridSearchCV\n", + "\n", + "grid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\n", + "grid_log_reg.fit(X_train, y_train)\n", + "\n", + "# Logistic Regression best estimator\n", + "log_reg = grid_log_reg.best_estimator_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize model evaluation objects\n", + "\n", + "The last step for evaluating the model's performance is to initialize the ValidMind `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset.\n", + "\n", + "Use the `init_dataset` and [`init_model`](https://docs.validmind.ai/validmind/validmind.html#init_model) functions to initialize these objects:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_df = X_train\n", + "train_df[\"Exited\"] = y_train\n", + "test_df = X_test\n", + "test_df[\"Exited\"] = y_test\n", + "\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset_final\",\n", + " dataset=train_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset_final\",\n", + " dataset=test_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "# Register the model\n", + "vm_model = vm.init_model(log_reg, input_id=\"log_reg_model_v1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign predictions\n", + "\n", + "Once the model has been registered you can assign model predictions to the training and test datasets. The [`assign_predictions()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#VMDataset.assign_predictions) from the `Dataset` object can link existing predictions to any number of models.\n", + "\n", + "If no prediction values are passed, the method will compute predictions automatically:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(model=vm_model)\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run the model evaluation tests\n", + "\n", + "In this next example, we'll focus on running the tests within the Model Development section of the model documentation. Only tests associated with this section will be executed, and the corresponding results will be updated in the model documentation.\n", + "\n", + "- Note the additional config that is passed to `run_documentation_tests()` — this allows you to override `inputs` or `params` in certain tests.\n", + "- In our case, we want to explicitly use the `vm_train_ds` for the [`validmind.model_validation.sklearn.ClassifierPerformance:in_sample` test](https://docs.validmind.ai/tests/model_validation/sklearn/ClassifierPerformance.html), since it's supposed to run on the training dataset and not the test dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_config = {\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:in_sample\": {\n", + " \"inputs\": {\n", + " \"dataset\": vm_train_ds,\n", + " \"model\": vm_model,\n", + " },\n", + " }\n", + "}\n", + "results = vm.run_documentation_tests(\n", + " section=[\"model_development\"],\n", + " inputs={\n", + " \"dataset\": vm_test_ds, # Any test that requires a single dataset will use vm_test_ds\n", + " \"model\": vm_model,\n", + " \"datasets\": (\n", + " vm_train_ds,\n", + " vm_test_ds,\n", + " ), # Any test that requires multiple datasets will use vm_train_ds and vm_test_ds\n", + " },\n", + " config=test_config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this second notebook, you learned how to:\n", + "\n", + "- [ ] Import a sample dataset\n", + "- [ ] Identify which tests you might want to run with ValidMind\n", + "- [ ] Initialize ValidMind datasets\n", + "- [ ] Run individual tests\n", + "- [ ] Utilize the output from tests you've run\n", + "- [ ] Log test results from sets of or individual tests as evidence to the ValidMind Platform\n", + "- [ ] Add supplementary individual test results to your documentation\n", + "- [ ] Assign model predictions to your ValidMind datasets\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Integrate custom tests\n", + "\n", + "Now that you're familiar with the basics of using the ValidMind Library to run and log tests to provide evidence for your model documentation, let's learn how to incorporate your own custom tests into ValidMind: **[103 Integrate custom tests](103-integrate_custom_tests.ipynb)**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "name": "python", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/site/notebooks/tutorials/model_development/103-integrate_custom_tests.ipynb b/site/notebooks/tutorials/model_development/103-integrate_custom_tests.ipynb new file mode 100644 index 0000000000..bcde58c62c --- /dev/null +++ b/site/notebooks/tutorials/model_development/103-integrate_custom_tests.ipynb @@ -0,0 +1,986 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ValidMind for model development — 103 Integrate custom tests\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process with our series of four introductory notebooks. In this third notebook, supplement ValidMind tests with your own and include them as additional evidence in your documentation.\n", + "\n", + "This notebook assumes that you already have a repository of custom made tests considered critical to include in your documentation. A custom test is any function that takes a set of inputs and parameters as arguments and returns one or more outputs:\n", + "\n", + "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", + "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", + "\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb) notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Prerequisites](#toc1_) \n", + "- [Setting up](#toc2_) \n", + " - [Initialize the ValidMind Library](#toc2_1_) \n", + " - [Import sample dataset](#toc2_2_) \n", + " - [Remove highly correlated features](#toc2_2_1_) \n", + " - [Train the model](#toc2_3_) \n", + " - [Initialize the ValidMind objects](#toc2_3_1_) \n", + " - [Assign predictions](#toc2_3_2_) \n", + "- [Implementing a custom inline test](#toc3_) \n", + " - [Create a confusion matrix plot](#toc3_1_) \n", + " - [Add parameters to custom tests](#toc3_2_) \n", + " - [Pass parameters to custom tests](#toc3_3_) \n", + " - [Log the confusion matrix results](#toc3_4_) \n", + "- [Using external test providers](#toc4_) \n", + " - [Create custom tests folder](#toc4_1_) \n", + " - [Save an inline test](#toc4_2_) \n", + " - [Register a local test provider](#toc4_3_) \n", + " - [Initialize a local test provider](#toc4_3_1_) \n", + " - [Run test provider tests](#toc4_3_2_) \n", + "- [Add test results to documentation](#toc5_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Finalize testing and documentation](#toc7_1_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prerequisites\n", + "\n", + "In order to log test results or evidence to your model documentation with this notebook, you'll need to first have:\n", + "\n", + "- [ ] Registered a model within the ValidMind Platform with a predefined documentation template\n", + "- [ ] Installed the ValidMind Library in your local environment, allowing you to access all its features\n", + "- [ ] Learned how to import and initialize datasets for use with ValidMind\n", + "- [ ] Understood the basics of how to run and log tests with ValidMind\n", + "- [ ] Inserted a test-driven block for the results of your `HighPearsonCorrelation:balanced_raw_dataset` test into your model's documentation\n", + "\n", + "
Need help with the above steps?\n", + "

\n", + "Refer to the first two notebooks in this series:\n", + "\n", + "
    \n", + "
  1. 101 Set up ValidMind
  2. \n", + "
  3. 102 Start the model development process
  4. \n", + "
\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up\n", + "\n", + "This section should be quite familiar to you — as we performed the same actions in the previous notebook, **[102 Start the model development process](102-start_development_process.ipynb)**." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "As usual, let's first connect up the ValidMind Library to our model we previously registered in the ValidMind Platform:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure the ValidMind Library is installed\n", + "\n", + "%pip install -q validmind\n", + "\n", + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Import sample dataset\n", + "\n", + "Next, we'll import the same public [Bank Customer Churn Prediction](https://www.kaggle.com/datasets/shantanudhakadd/bank-customer-churn-prediction) dataset from Kaggle we used in the last notebook so that we have something to work with:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll apply a simple rebalancing technique to the dataset before continuing:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "raw_copy_df = raw_df.sample(frac=1) # Create a copy of the raw dataset\n", + "\n", + "# Create a balanced dataset with the same number of exited and not exited customers\n", + "exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 1]\n", + "not_exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 0].sample(n=exited_df.shape[0])\n", + "\n", + "balanced_raw_df = pd.concat([exited_df, not_exited_df])\n", + "balanced_raw_df = balanced_raw_df.sample(frac=1, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Remove highly correlated features\n", + "\n", + "Let's also quickly remove highly correlated features from the dataset using the output from a ValidMind test.\n", + "\n", + "As you learned previously, before we can run tests you'll need to initialize a ValidMind dataset object:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register new data and now 'balanced_raw_dataset' is the new dataset object of interest\n", + "vm_balanced_raw_dataset = vm.init_dataset(\n", + " dataset=balanced_raw_df,\n", + " input_id=\"balanced_raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our balanced dataset initialized, we can then run our test and utilize the output to help us identify the features we want to remove:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run HighPearsonCorrelation test with our balanced dataset as input and return a result object\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# From result object, extract table from `corr_result.tables`\n", + "features_df = corr_result.tables[0].data\n", + "features_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract list of features that failed the test\n", + "high_correlation_features = features_df[features_df[\"Pass/Fail\"] == \"Fail\"][\"Columns\"].tolist()\n", + "high_correlation_features" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract feature names from the list of strings\n", + "high_correlation_features = [feature.split(\",\")[0].strip(\"()\") for feature in high_correlation_features]\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then re-initialize the dataset with a different `input_id` and the highly correlated features removed and re-run the test for confirmation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Remove the highly correlated features from the dataset\n", + "balanced_raw_no_age_df = balanced_raw_df.drop(columns=high_correlation_features)\n", + "\n", + "# Re-initialize the dataset object\n", + "vm_raw_dataset_preprocessed = vm.init_dataset(\n", + " dataset=balanced_raw_no_age_df,\n", + " input_id=\"raw_dataset_preprocessed\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Re-run the test with the reduced feature set\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Train the model\n", + "\n", + "We'll then use ValidMind tests to train a simple logistic regression model on our prepared dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First encode the categorical features in our dataset with the highly correlated features removed\n", + "balanced_raw_no_age_df = pd.get_dummies(\n", + " balanced_raw_no_age_df, columns=[\"Geography\", \"Gender\"], drop_first=True\n", + ")\n", + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "# Split the input and target variables\n", + "X = balanced_raw_no_age_df.drop(\"Exited\", axis=1)\n", + "y = balanced_raw_no_age_df[\"Exited\"]\n", + "X_train, X_test, y_train, y_test = train_test_split(\n", + " X,\n", + " y,\n", + " test_size=0.2,\n", + " random_state=42,\n", + ")\n", + "\n", + "# Logistic Regression grid params\n", + "log_reg_params = {\n", + " \"penalty\": [\"l1\", \"l2\"],\n", + " \"C\": [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n", + " \"solver\": [\"liblinear\"],\n", + "}\n", + "\n", + "# Grid search for Logistic Regression\n", + "from sklearn.model_selection import GridSearchCV\n", + "\n", + "grid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\n", + "grid_log_reg.fit(X_train, y_train)\n", + "\n", + "# Logistic Regression best estimator\n", + "log_reg = grid_log_reg.best_estimator_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the ValidMind objects\n", + "\n", + "Let's initialize the ValidMind `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_df = X_train\n", + "train_df[\"Exited\"] = y_train\n", + "test_df = X_test\n", + "test_df[\"Exited\"] = y_test\n", + "\n", + "# Initialize the datasets into their own dataset objects\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset_final\",\n", + " dataset=train_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset_final\",\n", + " dataset=test_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "# Initialize a model object\n", + "vm_model = vm.init_model(log_reg, input_id=\"log_reg_model_v1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Assign predictions\n", + "\n", + "Once the model is registered, we'll assign predictions to the training and test datasets:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(model=vm_model)\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Implementing a custom inline test\n", + "\n", + "With the set up out of the way, let's implement a custom *inline test* that calculates the confusion matrix for a binary classification model.\n", + "\n", + "- An inline test refers to a test written and executed within the same environment as the code being tested — in this case, right in this Jupyter Notebook — without requiring a separate test file or framework.\n", + "- You'll note that the custom test function is just a regular Python function that can include and require any Python library as you see fit." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Create a confusion matrix plot\n", + "\n", + "Let's first create a confusion matrix plot using the `confusion_matrix` function from the `sklearn.metrics` module:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "\n", + "# Get the predicted classes\n", + "y_pred = log_reg.predict(vm_test_ds.x)\n", + "\n", + "confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\n", + "\n", + "cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + ")\n", + "cm_display.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, create a [`@vm.test` wrapper](https://docs.validmind.ai/validmind/validmind.html#test) that will allow you to create a reusable test. **Note the following changes in the code below:**\n", + "\n", + "- The function `confusion_matrix` takes two arguments `dataset` and `model`. This is a `VMDataset` and `VMModel` object respectively.\n", + " - `VMDataset` objects allow you to access the dataset's true (target) values by accessing the `.y` attribute.\n", + " - `VMDataset` objects allow you to access the predictions for a given model by accessing the `.y_pred()` method.\n", + "- The function docstring provides a description of what the test does. This will be displayed along with the result in this notebook as well as in the ValidMind Platform.\n", + "- The function body calculates the confusion matrix using the `sklearn.metrics.confusion_matrix` function as we just did above.\n", + "- The function then returns the `ConfusionMatrixDisplay.figure_` object — this is important as the ValidMind Library expects the output of the custom test to be a plot or a table.\n", + "- The `@vm.test` decorator is doing the work of creating a wrapper around the function that will allow it to be run by the ValidMind Library. It also registers the test so it can be found by the ID `my_custom_tests.ConfusionMatrix`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can now run the newly created custom test on both the training and test datasets using the [`run_test()` function](https://docs.validmind.ai/validmind/validmind/tests.html#run_test):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Training dataset\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:training_dataset\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_train_ds},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test dataset\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Add parameters to custom tests\n", + "\n", + "Custom tests can take parameters just like any other function. To demonstrate, let's modify the `confusion_matrix` function to take an additional parameter `normalize` that will allow you to normalize the confusion matrix:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model, normalize=False):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " if normalize:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred, normalize=\"all\")\n", + " else:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Pass parameters to custom tests\n", + "\n", + "You can pass parameters to custom tests by providing a dictionary of parameters to the `run_test()` function.\n", + "\n", + "- The parameters will override any default parameters set in the custom test definition. Note that `dataset` and `model` are still passed as `inputs`.\n", + "- Since these are `VMDataset` or `VMModel` inputs, they have a special meaning.\n", + "- When declaring a `dataset`, `model`, `datasets` or `models` argument in a custom test function, the ValidMind Library will expect these get passed as `inputs` to `run_test()` or `run_documentation_tests()`.\n", + "\n", + "Re-running the confusion matrix with `normalize=True` looks like this:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test dataset with normalize=True\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset_normalized\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + " params={\"normalize\": True},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Log the confusion matrix results\n", + "\n", + "As we learned in **[102 Start the model development process](102-start_development_process.ipynb)** under **Documenting results** > **Run and log an individual tests**, you can log any result to the ValidMind Platform with the [`.log()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#TestResult.log) of the result object, allowing you to then add the result to the documentation.\n", + "\n", + "You can now do the same for the confusion matrix results:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "result.log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Note the output returned indicating that a test-driven block doesn't currently exist in your model's documentation for this particular test ID. \n", + "

\n", + "That's expected, as when we run individual tests the results logged need to be manually added to your documentation within the ValidMind Platform.
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Using external test providers\n", + "\n", + "Creating inline custom tests with a function is a great way to customize your model documentation. However, sometimes you may want to reuse the same set of tests across multiple models and share them with others in your organization. In this case, you can create an external custom *test provider* that will allow you to load custom tests from a local folder or a Git repository.\n", + "\n", + "In this section you will learn how to declare a local filesystem test provider that allows loading tests from a local folder following these high level steps:\n", + "\n", + "1. Create a folder of custom tests from existing inline tests (tests that exist in your active Jupyter Notebook)\n", + "2. Save an inline test to a file\n", + "3. Define and register a [`LocalTestProvider`](https://docs.validmind.ai/validmind/validmind/tests.html#LocalTestProvider) that points to that folder\n", + "4. Run test provider tests\n", + "5. Add the test results to your documentation\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Create custom tests folder\n", + "\n", + "Let's start by creating a new folder that will contain reusable custom tests from your existing inline tests.\n", + "\n", + "The following code snippet will create a new `my_tests` directory in the current working directory if it doesn't exist:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "tests_folder = \"my_tests\"\n", + "\n", + "import os\n", + "\n", + "# create tests folder\n", + "os.makedirs(tests_folder, exist_ok=True)\n", + "\n", + "# remove existing tests\n", + "for f in os.listdir(tests_folder):\n", + " # remove files and pycache\n", + " if f.endswith(\".py\") or f == \"__pycache__\":\n", + " os.system(f\"rm -rf {tests_folder}/{f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After running the command above, confirm that the new `my_tests` directory was created successfully:\n", + "\n", + "\"Screenshot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Save an inline test\n", + "\n", + "The `@vm.test` decorator we used in **Implementing a custom inline test** above to register one-off custom tests also includes a convenience method on the function object that allows you to simply call `.save()` to save the test to a Python file at a specified path.\n", + "\n", + "While `save()` will get you started by creating the file and saving the function code with the correct name, it won't automatically include any imports, or other functions or variables, outside of the functions that are needed for the test to run. To solve this, pass in an optional `imports` argument ensuring necessary imports are added to the file.\n", + "\n", + "The `confusion_matrix` test requires the following additional imports:\n", + "\n", + "```python\n", + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "```\n", + "\n", + "Let's pass these imports to the `save()` method to ensure they are included in the file with the following command:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "confusion_matrix.save(\n", + " # Save it to the custom tests folder we created\n", + " tests_folder,\n", + " imports=[\"import matplotlib.pyplot as plt\", \"from sklearn import metrics\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- [ ] Confirm that the `save()` method saved the `confusion_matrix` function to a file named `ConfusionMatrix.py` in the `my_tests` folder.\n", + "- [ ] Note that the new file provides some context on the origin of the test, which is useful for traceability:\n", + "\n", + " ```\n", + " # Saved from __main__.confusion_matrix\n", + " # Original Test ID: my_custom_tests.ConfusionMatrix\n", + " # New Test ID: .ConfusionMatrix\n", + " ```\n", + "\n", + "- [ ] Additionally, the new test function has been stripped off its decorator, as it now resides in a file that will be loaded by the test provider:\n", + "\n", + " ```python\n", + " def ConfusionMatrix(dataset, model, normalize=False):\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Register a local test provider\n", + "\n", + "Now that your `my_tests` folder has a sample custom test, let's initialize a test provider that will tell the ValidMind Library where to find your custom tests:\n", + "\n", + "- ValidMind offers out-of-the-box test providers for local tests (tests in a folder) or a Github provider for tests in a Github repository.\n", + "- You can also create your own test provider by creating a class that has a [`load_test` method](https://docs.validmind.ai/validmind/validmind/tests.html#TestProvider.load_test) that takes a test ID and returns the test function matching that ID.\n", + "\n", + "
Want to learn more about test providers?\n", + "

\n", + "An extended introduction to test providers can be found in: Integrate external test providers
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize a local test provider\n", + "\n", + "For most use cases, using a `LocalTestProvider` that allows you to load custom tests from a designated directory should be sufficient.\n", + "\n", + "**The most important attribute for a test provider is its `namespace`.** This is a string that will be used to prefix test IDs in model documentation. This allows you to have multiple test providers with tests that can even share the same ID, but are distinguished by their namespace.\n", + "\n", + "Let's go ahead and load the custom tests from our `my_tests` directory:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.tests import LocalTestProvider\n", + "\n", + "# initialize the test provider with the tests folder we created earlier\n", + "my_test_provider = LocalTestProvider(tests_folder)\n", + "\n", + "vm.tests.register_test_provider(\n", + " namespace=\"my_test_provider\",\n", + " test_provider=my_test_provider,\n", + ")\n", + "# `my_test_provider.load_test()` will be called for any test ID that starts with `my_test_provider`\n", + "# e.g. `my_test_provider.ConfusionMatrix` will look for a function named `ConfusionMatrix` in `my_tests/ConfusionMatrix.py` file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Run test provider tests\n", + "\n", + "Now that we've set up the test provider, we can run any test that's located in the tests folder by using the `run_test()` method as with any other test:\n", + "\n", + "- For tests that reside in a test provider directory, the test ID will be the `namespace` specified when registering the provider, followed by the path to the test file relative to the tests folder.\n", + "- For example, the Confusion Matrix test we created earlier will have the test ID `my_test_provider.ConfusionMatrix`. You could organize the tests in subfolders, say `classification` and `regression`, and the test ID for the Confusion Matrix test would then be `my_test_provider.classification.ConfusionMatrix`.\n", + "\n", + "Let's go ahead and re-run the confusion matrix test by using the test ID `my_test_provider.ConfusionMatrix`. This should load the test from the test provider and run it as before.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " \"my_test_provider.ConfusionMatrix\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + " params={\"normalize\": True},\n", + ")\n", + "\n", + "result.log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Again, note the output returned indicating that a test-driven block doesn't currently exist in your model's documentation for this particular test ID. \n", + "

\n", + "That's expected, as when we run individual tests the results logged need to be manually added to your documentation within the ValidMind Platform.
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Add test results to documentation\n", + "\n", + "With our custom tests run and results logged to the ValidMind Platform, let's head to the model we connected to at the beginning of this notebook and insert our test results into the documentation ([Need more help?](https://docs.validmind.ai/developer/model-documentation/work-with-test-results.html)):\n", + "\n", + "1. From the **Inventory** in the ValidMind Platform, go to the model you connected to earlier.\n", + "\n", + "2. In the left sidebar that appears for your model, click **Documentation**.\n", + "\n", + "3. Locate the Data Preparation section and click on **3.2 Model Evaluation** to expand that section.\n", + "\n", + "4. Hover under the Pearson Correlation Matrix content block until a horizontal dashed line with a **+** button appears, indicating that you can insert a new block.\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "5. Click **+** and then select **Test-Driven Block**:\n", + "\n", + " - In the search bar, type in `ConfusionMatrix`.\n", + " - Select the custom `ConfusionMatrix` tests you logged above:\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "6. Finally, click **Insert 2 Test Results to Document** to add the test results to the documentation.\n", + "\n", + " Confirm that the two individual results for the confusion matrix tests have been correctly inserted into section **3.2 Model Evaluation** of the documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this third notebook, you learned how to:\n", + "\n", + "- [ ] Implement a custom inline test\n", + "- [ ] Run and log your custom inline tests\n", + "- [ ] Use external custom test providers\n", + "- [ ] Run and log tests from your custom test providers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Finalize testing and documentation\n", + "\n", + "Now that you're proficient at using the ValidMind Library to run and log tests, let's put the last pieces in place to prepare our fully documented sample model for review: **[104 Finalize testing and documentation](104-finalize_testing_documentation.ipynb)**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/site/notebooks/tutorials/model_development/104-finalize_testing_documentation.ipynb b/site/notebooks/tutorials/model_development/104-finalize_testing_documentation.ipynb new file mode 100644 index 0000000000..fd17733fcd --- /dev/null +++ b/site/notebooks/tutorials/model_development/104-finalize_testing_documentation.ipynb @@ -0,0 +1,966 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ValidMind for model development — 104 Finalize testing and documentation\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process with our introductory notebook series. In this last notebook, finalize the testing and documentation of your model and have a fully documented sample model ready for review.\n", + "\n", + "We'll first use [`run_documentation_tests()`](https://docs.validmind.ai/validmind/validmind.html#run_documentation_tests) previously covered in **[102 Start the model development process](102-start_development_process.ipynb)** to ensure that your custom test results generated in **[103 Integrate custom tests](103-integrate_custom_tests.ipynb)** are included in your documentation. Then, we'll view and update the configuration for the entire model documentation template to suit your needs.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Prerequisites](#toc1_) \n", + "- [Setting up](#toc2_) \n", + " - [Initialize the ValidMind Library](#toc2_1_) \n", + " - [Import sample dataset](#toc2_2_) \n", + " - [Remove highly correlated features](#toc2_2_1_) \n", + " - [Train the model](#toc2_3_) \n", + " - [Initialize the ValidMind objects](#toc2_3_1_) \n", + " - [Assign predictions](#toc2_3_2_) \n", + " - [Add custom tests](#toc2_4_) \n", + " - [Implement custom inline test](#toc2_4_1_) \n", + " - [Add a local test provider](#toc2_4_2_) \n", + "- [Reconnect to ValidMind](#toc3_) \n", + "- [Include custom test results](#toc4_) \n", + "- [Documentation template configuration](#toc5_) \n", + " - [Update the config](#toc5_1_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Work with your model documentation](#toc7_1_) \n", + " - [Learn more](#toc7_2_) \n", + " - [Use cases](#toc7_2_1_) \n", + " - [More how-to guides and code samples](#toc7_2_2_) \n", + " - [Discover more learning resources](#toc7_2_3_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prerequisites\n", + "\n", + "In order to finalize the testing and documentation for your sample model, you'll need to first have:\n", + "\n", + "- [ ] Registered a model within the ValidMind Platform with a predefined documentation template\n", + "- [ ] Installed the ValidMind Library in your local environment, allowing you to access all its features\n", + "- [ ] Learned how to import and initialize datasets for use with ValidMind\n", + "- [ ] Learned how to run and log default and custom tests with ValidMind, including from external test providers\n", + "- [ ] Inserted test-driven blocks for the results of the following tests into your model's documentation:\n", + " - [ ] `HighPearsonCorrelation:balanced_raw_dataset`\n", + " - [ ] `my_test_provider.ConfusionMatrix`\n", + " - [ ] `my_custom_tests.ConfusionMatrix:test_dataset_normalized`\n", + "\n", + "
Need help with the above steps?\n", + "

\n", + "Refer to the first three notebooks in this series:\n", + "\n", + "
    \n", + "
  1. 101 Set up ValidMind
  2. \n", + "
  3. 102 Start the model development process
  4. \n", + "
  5. 103 Integrate custom tests
  6. \n", + "
\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up\n", + "\n", + "This section should be very familiar to you now — as we performed the same actions in the previous two notebooks in this series." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "As usual, let's first connect up the ValidMind Library to our model we previously registered in the ValidMind Platform:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure the ValidMind Library is installed\n", + "\n", + "%pip install -q validmind\n", + "\n", + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Import sample dataset\n", + "\n", + "Next, we'll import the same public [Bank Customer Churn Prediction](https://www.kaggle.com/datasets/shantanudhakadd/bank-customer-churn-prediction) dataset from Kaggle we used in the last notebook so that we have something to work with:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll apply a simple rebalancing technique to the dataset before continuing:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "raw_copy_df = raw_df.sample(frac=1) # Create a copy of the raw dataset\n", + "\n", + "# Create a balanced dataset with the same number of exited and not exited customers\n", + "exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 1]\n", + "not_exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 0].sample(n=exited_df.shape[0])\n", + "\n", + "balanced_raw_df = pd.concat([exited_df, not_exited_df])\n", + "balanced_raw_df = balanced_raw_df.sample(frac=1, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Remove highly correlated features\n", + "\n", + "Let's also quickly remove highly correlated features from the dataset using the output from a ValidMind test.\n", + "\n", + "As you learned previously, before we can run tests you'll need to initialize a ValidMind dataset object:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register new data and now 'balanced_raw_dataset' is the new dataset object of interest\n", + "vm_balanced_raw_dataset = vm.init_dataset(\n", + " dataset=balanced_raw_df,\n", + " input_id=\"balanced_raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our balanced dataset initialized, we can then run our test and utilize the output to help us identify the features we want to remove:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run HighPearsonCorrelation test with our balanced dataset as input and return a result object\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# From result object, extract table from `corr_result.tables`\n", + "features_df = corr_result.tables[0].data\n", + "features_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract list of features that failed the test\n", + "high_correlation_features = features_df[features_df[\"Pass/Fail\"] == \"Fail\"][\"Columns\"].tolist()\n", + "high_correlation_features" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract feature names from the list of strings\n", + "high_correlation_features = [feature.split(\",\")[0].strip(\"()\") for feature in high_correlation_features]\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then re-initialize the dataset with a different `input_id` and the highly correlated features removed and re-run the test for confirmation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Remove the highly correlated features from the dataset\n", + "balanced_raw_no_age_df = balanced_raw_df.drop(columns=high_correlation_features)\n", + "\n", + "# Re-initialize the dataset object\n", + "vm_raw_dataset_preprocessed = vm.init_dataset(\n", + " dataset=balanced_raw_no_age_df,\n", + " input_id=\"raw_dataset_preprocessed\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Re-run the test with the reduced feature set\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Train the model\n", + "\n", + "We'll then use ValidMind tests to train a simple logistic regression model on our prepared dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First encode the categorical features in our dataset with the highly correlated features removed\n", + "balanced_raw_no_age_df = pd.get_dummies(\n", + " balanced_raw_no_age_df, columns=[\"Geography\", \"Gender\"], drop_first=True\n", + ")\n", + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "# Split the input and target variables\n", + "X = balanced_raw_no_age_df.drop(\"Exited\", axis=1)\n", + "y = balanced_raw_no_age_df[\"Exited\"]\n", + "X_train, X_test, y_train, y_test = train_test_split(\n", + " X,\n", + " y,\n", + " test_size=0.2,\n", + " random_state=42,\n", + ")\n", + "\n", + "# Logistic Regression grid params\n", + "log_reg_params = {\n", + " \"penalty\": [\"l1\", \"l2\"],\n", + " \"C\": [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n", + " \"solver\": [\"liblinear\"],\n", + "}\n", + "\n", + "# Grid search for Logistic Regression\n", + "from sklearn.model_selection import GridSearchCV\n", + "\n", + "grid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\n", + "grid_log_reg.fit(X_train, y_train)\n", + "\n", + "# Logistic Regression best estimator\n", + "log_reg = grid_log_reg.best_estimator_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the ValidMind objects\n", + "\n", + "Let's initialize the ValidMind `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_df = X_train\n", + "train_df[\"Exited\"] = y_train\n", + "test_df = X_test\n", + "test_df[\"Exited\"] = y_test\n", + "\n", + "# Initialize the datasets into their own dataset objects\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset_final\",\n", + " dataset=train_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset_final\",\n", + " dataset=test_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "# Initialize a model object\n", + "vm_model = vm.init_model(log_reg, input_id=\"log_reg_model_v1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Assign predictions\n", + "\n", + "Once the model is registered, we'll assign predictions to the training and test datasets:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(model=vm_model)\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Add custom tests\n", + "\n", + "We'll also add the same custom tests we implemented in the previous notebook so that this session has access to the same custom inline test and local test provider." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Implement custom inline test\n", + "\n", + "Let's set up a custom inline test that calculates the confusion matrix for a binary classification model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First create a confusion matrix plot\n", + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "\n", + "# Get the predicted classes\n", + "y_pred = log_reg.predict(vm_test_ds.x)\n", + "\n", + "confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\n", + "\n", + "cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + ")\n", + "cm_display.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the reusable ConfusionMatrix inline test with normalized matrix\n", + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model, normalize=False):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " if normalize:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred, normalize=\"all\")\n", + " else:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test dataset with normalize=True\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset_normalized\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + " params={\"normalize\": True},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Add a local test provider\n", + "\n", + "Finally, let's save our custom inline test to our local test provider:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create custom tests folder\n", + "tests_folder = \"my_tests\"\n", + "\n", + "import os\n", + "\n", + "# create tests folder\n", + "os.makedirs(tests_folder, exist_ok=True)\n", + "\n", + "# remove existing tests\n", + "for f in os.listdir(tests_folder):\n", + " # remove files and pycache\n", + " if f.endswith(\".py\") or f == \"__pycache__\":\n", + " os.system(f\"rm -rf {tests_folder}/{f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save custom inline test to custom tests folder\n", + "confusion_matrix.save(\n", + " tests_folder,\n", + " imports=[\"import matplotlib.pyplot as plt\", \"from sklearn import metrics\"],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register local test provider\n", + "from validmind.tests import LocalTestProvider\n", + "\n", + "# initialize the test provider with the tests folder we created earlier\n", + "my_test_provider = LocalTestProvider(tests_folder)\n", + "\n", + "vm.tests.register_test_provider(\n", + " namespace=\"my_test_provider\",\n", + " test_provider=my_test_provider,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Reconnect to ValidMind\n", + "\n", + "After you insert test-driven blocks into your model documentation, changes should persist and become available every time you call [`vm.preview_template()`](https://docs.validmind.ai/validmind/validmind.html#preview_template).\n", + "\n", + "However, you'll need to reload the connection to the ValidMind Platform if you have added test-driven blocks when the connection was already established using [`reload()`](https://docs.validmind.ai/validmind/validmind.html#reload):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.reload()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, when you run `preview_template()` again, the three test-driven blocks you added to your documentation in the last two notebooks in should show up in the template in sections **2.3 Correlations and Interactions** and **3.2 Model Evaluation**:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Include custom test results\n", + "\n", + "Since your custom test IDs are now part of your documentation template, you can now run tests for an entire section and all additional custom tests should be loaded without any issues.\n", + "\n", + "Let's run all tests in the Model Evaluation section of the documentation. Note that we have been running the sample custom confusion matrix with `normalize=True` to demonstrate the ability to provide custom parameters.\n", + "\n", + "In the **Run the model evaluation tests** section of **[102 Start the model development process](102-start_development_process.ipynb)**, you learned how to assign inputs to individual tests with [`run_documentation_tests()`](https://docs.validmind.ai/validmind/validmind.html#run_documentation_tests). Assigning parameters is similar, you only need to provide assign a `params` dictionary to a given test ID, `my_test_provider.ConfusionMatrix` in this case.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_config = {\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:in_sample\": {\n", + " \"inputs\": {\n", + " \"dataset\": vm_train_ds,\n", + " \"model\": vm_model,\n", + " },\n", + " },\n", + " \"my_test_provider.ConfusionMatrix\": {\n", + " \"params\": {\"normalize\": True},\n", + " \"inputs\": {\"dataset\": vm_test_ds, \"model\": vm_model},\n", + " },\n", + "}\n", + "results = vm.run_documentation_tests(\n", + " section=[\"model_evaluation\"],\n", + " inputs={\n", + " \"dataset\": vm_test_ds, # Any test that requires a single dataset will use vm_test_ds\n", + " \"model\": vm_model,\n", + " \"datasets\": (\n", + " vm_train_ds,\n", + " vm_test_ds,\n", + " ), # Any test that requires multiple datasets will use vm_train_ds and vm_test_ds\n", + " },\n", + " config=test_config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Documentation template configuration\n", + "\n", + "Let's call the utility function [`vm.get_test_suite().get_default_config()`](https://docs.validmind.ai/validmind/validmind/vm_models.html#TestSuite.get_default_config) which will return the **default configuration for the entire documentation template as a dictionary:**\n", + "\n", + "- This configuration will contain all the test IDs and their default parameters.\n", + "- You can then modify this configuration as needed and pass it to `run_documentation_tests()` to run all tests in the documentation template if needed.\n", + "- You still have the option to continue running tests for one section at a time; `get_default_config()` simply provides a useful reference for providing default parameters to every test." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "model_test_suite = vm.get_test_suite()\n", + "config = model_test_suite.get_default_config()\n", + "print(\"Suite Config: \\n\", json.dumps(config, indent=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "### Update the config\n", + "\n", + "The default config does not assign any inputs to a test, but you can assign inputs to individual tests as needed depending on the datasets and models you want to pass to individual tests.\n", + "\n", + "For this particular documentation template (binary classification), the ValidMind Library provides a sample configuration that can be used to populate the entire model documentation using the following inputs as placeholders:\n", + "\n", + "- A **`raw_dataset`** raw dataset\n", + "- A **`train_dataset`** training dataset\n", + "- A **`test_dataset`** test dataset\n", + "- A trained **`model`** instance\n", + "\n", + "As part of updating the `config` you will need to ensure the correct `input_id`s are used in the final config passed to `run_documentation_tests()`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn\n", + "from validmind.utils import preview_test_config\n", + "\n", + "test_config = customer_churn.get_demo_test_config()\n", + "preview_test_config(test_config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using this sample configuration, let's finish populating model documentation by running all tests for the Model Development section of the documentation.\n", + "\n", + "Recall that the training and test datasets in our exercise have the following `input_id` values:\n", + "\n", + "- **`train_dataset_final`** for the training dataset\n", + "- **`test_dataset_final`** for the test dataset\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"validmind.model_validation.ModelMetadata\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\"},\n", + " },\n", + " \"validmind.data_validation.DatasetSplit\": {\n", + " \"inputs\": {\"datasets\": [\"train_dataset_final\", \"test_dataset_final\"]},\n", + " },\n", + " \"validmind.model_validation.sklearn.PopulationStabilityIndex\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\"num_bins\": 10, \"mode\": \"fixed\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.ConfusionMatrix\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"my_test_provider.ConfusionMatrix\": {\n", + " \"inputs\": {\"dataset\": \"test_dataset_final\", \"model\": \"log_reg_model_v1\"},\n", + " },\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset_normalized\": {\n", + " \"inputs\": {\"dataset\": \"test_dataset_final\", \"model\": \"log_reg_model_v1\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:in_sample\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"train_dataset_final\"}\n", + " },\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:out_of_sample\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"}\n", + " },\n", + " \"validmind.model_validation.sklearn.PrecisionRecallCurve\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.ROCCurve\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.TrainingTestDegradation\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\n", + " \"metrics\": [\"accuracy\", \"precision\", \"recall\", \"f1\"],\n", + " \"max_threshold\": 0.1,\n", + " },\n", + " },\n", + " \"validmind.model_validation.sklearn.MinimumAccuracy\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"min_threshold\": 0.7},\n", + " },\n", + " \"validmind.model_validation.sklearn.MinimumF1Score\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"min_threshold\": 0.5},\n", + " },\n", + " \"validmind.model_validation.sklearn.MinimumROCAUCScore\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"min_threshold\": 0.5},\n", + " },\n", + " \"validmind.model_validation.sklearn.PermutationFeatureImportance\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.SHAPGlobalImportance\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"kernel_explainer_samples\": 10},\n", + " },\n", + " \"validmind.model_validation.sklearn.WeakspotsDiagnosis\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\n", + " \"thresholds\": {\"accuracy\": 0.75, \"precision\": 0.5, \"recall\": 0.5, \"f1\": 0.7}\n", + " },\n", + " },\n", + " \"validmind.model_validation.sklearn.OverfitDiagnosis\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\"cut_off_percentage\": 4},\n", + " },\n", + " \"validmind.model_validation.sklearn.RobustnessDiagnosis\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\n", + " \"scaling_factor_std_dev_list\": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5],\n", + " \"accuracy_decay_threshold\": 4,\n", + " },\n", + " },\n", + "}\n", + "\n", + "\n", + "full_suite = vm.run_documentation_tests(\n", + " section=\"model_development\",\n", + " config=config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this final notebook, you learned how to:\n", + "\n", + "- [ ] Refresh the connection from the ValidMind Library to the ValidMind Platform after you've inserted test-driven blocks to your documentation\n", + "- [ ] Include custom test results in your model documentation\n", + "- [ ] View and configure the configuration for your model documentation template\n", + "\n", + "With our ValidMind for model development series of notebooks, you learned how to document a model end-to-end with the ValidMind Library by running through some common scenarios in a typical model development setting:\n", + "\n", + "- Running out-of-the-box tests\n", + "- Documenting your model by adding evidence to model documentation\n", + "- Extending the capabilities of the ValidMind Library by implementing custom tests\n", + "- Ensuring that the documentation is complete by running all tests in the documentation template" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Work with your model documentation\n", + "\n", + "Now that you've logged all your test results and generated a draft for your model documentation, head to the ValidMind Platform to make qualitative edits, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. **Learn more:** [Working with model documentation](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Learn more\n", + "\n", + "Now that you're familiar with the basics, you can explore the following notebooks to get a deeper understanding on how the ValidMind Library allows you generate model documentation for any use case:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Use cases\n", + "\n", + "- [Application scorecard demo](../../code_samples/credit_risk/application_scorecard_demo.ipynb)\n", + "- [Linear regression documentation demo](../../code_samples/regression/quickstart_regression_full_suite.ipynb)\n", + "- [LLM model documentation demo](../../code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### More how-to guides and code samples\n", + "\n", + "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../code_samples/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [Configuring dataset features](../../how_to/configure_dataset_features.ipynb)\n", + "- [Introduction to unit and composite metrics](../../how_to/run_unit_metrics.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Discover more learning resources\n", + "\n", + "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", + "\n", + "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "name": "python", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/site/notebooks/tutorials/model_development/add-content-block.gif b/site/notebooks/tutorials/model_development/add-content-block.gif new file mode 100644 index 0000000000..daab9d44d8 Binary files /dev/null and b/site/notebooks/tutorials/model_development/add-content-block.gif differ diff --git a/site/notebooks/tutorials/model_development/high-pearson-correlation-block.png b/site/notebooks/tutorials/model_development/high-pearson-correlation-block.png new file mode 100644 index 0000000000..dbe44392d1 Binary files /dev/null and b/site/notebooks/tutorials/model_development/high-pearson-correlation-block.png differ diff --git a/site/notebooks/tutorials/model_development/my_tests_directory.png b/site/notebooks/tutorials/model_development/my_tests_directory.png new file mode 100644 index 0000000000..47baffe80e Binary files /dev/null and b/site/notebooks/tutorials/model_development/my_tests_directory.png differ diff --git a/site/notebooks/tutorials/model_development/selecting-confusion-matrix-test.png b/site/notebooks/tutorials/model_development/selecting-confusion-matrix-test.png new file mode 100644 index 0000000000..de27323a76 Binary files /dev/null and b/site/notebooks/tutorials/model_development/selecting-confusion-matrix-test.png differ diff --git a/site/notebooks/tutorials/model_development/selecting-high-pearson-correlation-test.png b/site/notebooks/tutorials/model_development/selecting-high-pearson-correlation-test.png new file mode 100644 index 0000000000..2c4b87c6bb Binary files /dev/null and b/site/notebooks/tutorials/model_development/selecting-high-pearson-correlation-test.png differ diff --git a/site/python-docs.zip b/site/python-docs.zip index 67d109e059..abbee90668 100644 Binary files a/site/python-docs.zip and b/site/python-docs.zip differ diff --git a/site/releases/2023/2023-aug-15/highlights.qmd b/site/releases/2023/2023-aug-15/highlights.qmd index 098b632895..a1688465f3 100644 --- a/site/releases/2023/2023-aug-15/highlights.qmd +++ b/site/releases/2023/2023-aug-15/highlights.qmd @@ -1,7 +1,7 @@ --- title: "August 15, 2023" aliases: - - ../../2023-aug-15/highlights.html + - /releases/2023-aug-15/highlights.html --- This release includes a number of improvements for the developer experience when using the {{< var validmind.developer >}}, along with a visual redesign of the {{< var validmind.platform >}}. diff --git a/site/releases/2023/2023-dec-13/highlights.qmd b/site/releases/2023/2023-dec-13/highlights.qmd index 45f602b09f..cd428ee024 100644 --- a/site/releases/2023/2023-dec-13/highlights.qmd +++ b/site/releases/2023/2023-dec-13/highlights.qmd @@ -1,7 +1,7 @@ --- title: "December 13, 2023" aliases: - - ../../2023-dec-13/highlights.html + - /releases/2023-dec-13/highlights.html --- ::: {.highlights} diff --git a/site/releases/2023/2023-nov-09/highlights.qmd b/site/releases/2023/2023-nov-09/highlights.qmd index e4fb70ebb7..db6c53dc19 100644 --- a/site/releases/2023/2023-nov-09/highlights.qmd +++ b/site/releases/2023/2023-nov-09/highlights.qmd @@ -1,7 +1,7 @@ --- title: "November 9, 2023" aliases: - - ../../2023-nov-09/highlights.html + - /releases/2023-nov-09/highlights.html --- This release introduces support for several new models, a new user onboarding guide and other {{< var validmind.platform >}} enhancements, and improved test descriptions in our user-facing documentation. diff --git a/site/releases/2023/2023-oct-25/highlights.qmd b/site/releases/2023/2023-oct-25/highlights.qmd index 89c76d9944..e7c14f294e 100644 --- a/site/releases/2023/2023-oct-25/highlights.qmd +++ b/site/releases/2023/2023-oct-25/highlights.qmd @@ -1,7 +1,7 @@ --- title: "October 25, 2023" aliases: - - ../../2023-oct-25/highlights.html + - /releases/2023-oct-25/highlights.html --- We've introduced new features to the {{< var vm.platform >}} that enable you to remove blocks of content from documentation and work with your settings more effectively. diff --git a/site/releases/2023/2023-sep-27/highlights.qmd b/site/releases/2023/2023-sep-27/highlights.qmd index c1eca18455..cd72321d06 100644 --- a/site/releases/2023/2023-sep-27/highlights.qmd +++ b/site/releases/2023/2023-sep-27/highlights.qmd @@ -1,7 +1,7 @@ --- title: "September 27, 2023" aliases: - - ../../2023-sep-27/highlights.html + - /releases/2023-sep-27/highlights.html listing: id: beta-announcement type: grid @@ -348,7 +348,7 @@ We enhanced the architecture and content of our external docs site to make the u ::: ::: {.w-50-ns} -[Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd){.button} +[{{< var validmind.developer >}}](/developer/validmind-library.qmd){.button} ::: diff --git a/site/releases/2023/release-notes-2023-jul-24.qmd b/site/releases/2023/release-notes-2023-jul-24.qmd index 10be22b7eb..306db567ed 100644 --- a/site/releases/2023/release-notes-2023-jul-24.qmd +++ b/site/releases/2023/release-notes-2023-jul-24.qmd @@ -1,7 +1,7 @@ --- title: "July 24, 2023" aliases: - - ../release-notes-2023-jul-24.html + - /releases/release-notes-2023-jul-24.html --- This release improves the developer experience within the {{< var validmind.developer >}} and introduces an updated notebook to demonstrate support for NLP models, now using CatBoost for greater performance. diff --git a/site/releases/2023/release-notes-2023-jun-22.qmd b/site/releases/2023/release-notes-2023-jun-22.qmd index ab13a8e17d..45fe58a714 100644 --- a/site/releases/2023/release-notes-2023-jun-22.qmd +++ b/site/releases/2023/release-notes-2023-jun-22.qmd @@ -1,7 +1,7 @@ --- title: "June 22, 2023" aliases: - - ../release-notes-2023-jun-22.html + - /releases/release-notes-2023-jun-22.html --- This release includes a number of major enhancements to the {{< var validmind.developer >}} that will make it easier for users to edit templates and add custom tests that can be reused across templates. @@ -57,7 +57,7 @@ Templates now function as dynamic test suites, allowing you to identify all the This makes it easier to fill a pre-configured template (including boilerplates and spaces designated for documentation and test results) with a single command, instead of running multiple test suites. ::: {.tc} -[Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd){.button .button-green} +[{{< var validmind.developer >}}](/developer/validmind-library.qmd){.button .button-green} ::: diff --git a/site/releases/2023/release-notes-2023-may-30.qmd b/site/releases/2023/release-notes-2023-may-30.qmd index c13b6f8af2..9185be4bf0 100644 --- a/site/releases/2023/release-notes-2023-may-30.qmd +++ b/site/releases/2023/release-notes-2023-may-30.qmd @@ -1,7 +1,7 @@ --- title: "May 30, 2023" aliases: - - ../release-notes-2023-may-30.html + - /releases/release-notes-2023-may-30.html --- This release includes enhanced plots with the Plotly package within the {{< var validmind.developer >}}, as well as support for export of model documentation to Word documents from the {{< var validmind.platform >}}. diff --git a/site/releases/2024/2024-aug-13/release-notes.qmd b/site/releases/2024/2024-aug-13/release-notes.qmd index 0c378c9f36..a56171468e 100644 --- a/site/releases/2024/2024-aug-13/release-notes.qmd +++ b/site/releases/2024/2024-aug-13/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "August 13, 2024" aliases: - - ../../2024-aug-13/release-notes.html + - /releases/2024-aug-13/release-notes.html --- diff --git a/site/releases/2024/2024-dec-06/release-notes.qmd b/site/releases/2024/2024-dec-06/release-notes.qmd index 1244b08eba..f50b3acd70 100644 --- a/site/releases/2024/2024-dec-06/release-notes.qmd +++ b/site/releases/2024/2024-dec-06/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "December 6, 2024" aliases: - - ../../2024-dec-06/release-notes.html + - /releases/2024-dec-06/release-notes.html filters: - tachyons - preview @@ -72,13 +72,13 @@ To reduce ambiguity and highlight the capabilities of our developer tools, as of :::: {.flex .flex-wrap .justify-around} -::: {.w-40-ns .tc} -[Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd){.button .button-green} +::: {.w-30-ns .tc} +[{{< var validmind.developer >}}](/developer/validmind-library.qmd){.button .button-green} ::: -::: {.w-30-ns .tc} -[{{< var validmind.developer >}} reference](/validmind/validmind.html){.button .button-green target="_blank"} +::: {.w-40-ns .tc} +[{{< var validmind.developer >}} API reference](/validmind/validmind.html){.button .button-green target="_blank"} ::: @@ -243,7 +243,7 @@ Of note is the **{{< fa cube >}} Developers** drop-down menu, previously called [{{< var vm.product >}} product names](/about/contributing/style-guide/conventions.qmd#product-names){.button .button-green} -[{{< fa cube >}} Developers](/developer/get-started-validmind-library.qmd){.button .button-green} +[{{< fa cube >}} Developers](/developer/validmind-library.qmd){.button .button-green} ::: @@ -708,7 +708,7 @@ If more than one set of test results has been logged with the {{< var validmind. ::: ::: {.w-30-ns .tc} -[Work with test results](/developer/model-documentation/work-with-test-results.qmd#filter-historical-test-results){.button} +[Work with test results](/guide/model-documentation/work-with-test-results.qmd#filter-historical-test-results){.button} ::: @@ -1019,12 +1019,12 @@ Information on available analytics and setting up reporting has also moved into #### {{< fa cube >}} Developers -##### Documenting models + -:::: {.flex .flex-wrap .justify-around} + ##### Upgrade {{< var vm.product >}} diff --git a/site/releases/2024/2024-dec-24/release-notes.qmd b/site/releases/2024/2024-dec-24/release-notes.qmd index 2884f518e9..2a9e7616e3 100644 --- a/site/releases/2024/2024-dec-24/release-notes.qmd +++ b/site/releases/2024/2024-dec-24/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "December 24, 2024" aliases: - - ../../2024-dec-24/release-notes.html + - /releases/2024-dec-24/release-notes.html listing: - id: deep-dive type: grid @@ -421,7 +421,7 @@ The **{{< fa cube >}} Developers** portal also got a makeover, with a {{< var vm ![New developers portal theme](new-developer-portal.png){width=80% fig-alt="A screenshot of the new developers portal theme" .screenshot} ::: {.column-margin} -[{{< fa cube >}} Developers](/developer/get-started-validmind-library.qmd){.button} +[{{< fa cube >}} Developers](/developer/validmind-library.qmd){.button} ::: diff --git a/site/releases/2024/2024-feb-14/highlights.qmd b/site/releases/2024/2024-feb-14/highlights.qmd index 98a6bb8d34..1d30c780a4 100644 --- a/site/releases/2024/2024-feb-14/highlights.qmd +++ b/site/releases/2024/2024-feb-14/highlights.qmd @@ -1,7 +1,7 @@ --- title: "February 14, 2024" aliases: - - ../../2024-feb-14/highlights.html + - /releases/2024-feb-14/highlights.html --- We've improved the {{< var vm.product >}} user experience, from more supportive documentation templates, easier specification of inputs, and better filtering within the {{< var vm.developer >}}, to the ability to view which user ran actions within the {{< var vm.platform >}}. diff --git a/site/releases/2024/2024-jan-18/highlights.qmd b/site/releases/2024/2024-jan-18/highlights.qmd index b518cf3a15..14a7dcedbc 100644 --- a/site/releases/2024/2024-jan-18/highlights.qmd +++ b/site/releases/2024/2024-jan-18/highlights.qmd @@ -1,7 +1,7 @@ --- title: "January 18, 2024" aliases: - - ../../2024-jan-18/highlights.html + - /releases/2024-jan-18/highlights.html --- This release introduces a new dark mode to the {{< var validmind.platform >}}, along with new user and template management features, other enhancements, and bug fixes. diff --git a/site/releases/2024/2024-jan-26/highlights.qmd b/site/releases/2024/2024-jan-26/highlights.qmd index 9325503592..7112a6b7cc 100644 --- a/site/releases/2024/2024-jan-26/highlights.qmd +++ b/site/releases/2024/2024-jan-26/highlights.qmd @@ -1,7 +1,7 @@ --- title: "January 26, 2024" aliases: - - ../../2024-jan-26/highlights.html + - /releases/2024-jan-26/highlights.html --- This release includes numerous improvements to the {{< var vm.developer >}}, including new features for model and dataset initialization, easier testing, support for additional inputs and the Azure OpenAI API, updated notebooks, bug fixes, and much more. diff --git a/site/releases/2024/2024-jul-22/release-notes.qmd b/site/releases/2024/2024-jul-22/release-notes.qmd index a9706b5540..40f2ea4a0b 100644 --- a/site/releases/2024/2024-jul-22/release-notes.qmd +++ b/site/releases/2024/2024-jul-22/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "July 22, 2024" aliases: - - ../../2024-jul-22/release-notes.html + - /releases/2024-jul-22/release-notes.html listing: id: training type: grid diff --git a/site/releases/2024/2024-jun-10/release-notes.qmd b/site/releases/2024/2024-jun-10/release-notes.qmd index 5a0a1d9850..14d8e4435d 100644 --- a/site/releases/2024/2024-jun-10/release-notes.qmd +++ b/site/releases/2024/2024-jun-10/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "June 10, 2024" aliases: - - ../../2024-jun-10/release-notes.html + - /releases/2024-jun-10/release-notes.html --- ## {{< fa bullhorn >}} Release highlights diff --git a/site/releases/2024/2024-mar-27/highlights.qmd b/site/releases/2024/2024-mar-27/highlights.qmd index 400bdc6972..565941ff86 100644 --- a/site/releases/2024/2024-mar-27/highlights.qmd +++ b/site/releases/2024/2024-mar-27/highlights.qmd @@ -1,7 +1,7 @@ --- title: "March 27, 2024" aliases: - - ../../2024-mar-27/highlights.html + - /releases/2024-mar-27/highlights.html --- ## Release highlights @@ -29,7 +29,7 @@ Test result metadata includes: - User attribution to tell you who updated the test results - Relevant inputs associated with the test results -[View test result metadata](/developer/model-documentation/work-with-test-results.qmd#view-test-result-metadata){.button .button-green} +[View test result metadata](/guide/model-documentation/work-with-test-results.qmd#view-test-result-metadata){.button .button-green} ::: @@ -457,7 +457,7 @@ We improved our supported models documentation with additional information about ::: ::: {.w-30-ns .tc} -[Supported models](/developer/model-documentation/supported-models.qmd){.button} +[Supported models](/developer/supported-models.qmd){.button} ::: diff --git a/site/releases/2024/2024-may-22/release-notes.qmd b/site/releases/2024/2024-may-22/release-notes.qmd index 55224e2c6a..c4e7802e01 100644 --- a/site/releases/2024/2024-may-22/release-notes.qmd +++ b/site/releases/2024/2024-may-22/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "May 22, 2024" aliases: - - ../../2024-may-22/release-notes.html + - /releases/2024-may-22/release-notes.html listing: id: training type: grid diff --git a/site/releases/2024/2024-oct-22/release-notes.qmd b/site/releases/2024/2024-oct-22/release-notes.qmd index 2e21446ad9..11e4bd8c8e 100644 --- a/site/releases/2024/2024-oct-22/release-notes.qmd +++ b/site/releases/2024/2024-oct-22/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "October 22, 2024" aliases: - - ../../2024-oct-22/release-notes.html + - /releases/2024-oct-22/release-notes.html listing: id: training type: grid diff --git a/site/releases/2024/2024-sep-09/release-notes.qmd b/site/releases/2024/2024-sep-09/release-notes.qmd index 5713a11a6b..744771e23a 100644 --- a/site/releases/2024/2024-sep-09/release-notes.qmd +++ b/site/releases/2024/2024-sep-09/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "September 9, 2024" aliases: - - ../../2024-sep-09/release-notes.html + - /releases/2024-sep-09/release-notes.html --- We've expanded functionality in the {{< var vm.platform >}}, including a more extensible version of risk areas that allows you to customize guidelines associated with your validation templates, as well as the ability to reset your model workflows. diff --git a/site/releases/2024/2024-sep-25/release-notes.qmd b/site/releases/2024/2024-sep-25/release-notes.qmd index 6ba80fff10..9cd533409c 100644 --- a/site/releases/2024/2024-sep-25/release-notes.qmd +++ b/site/releases/2024/2024-sep-25/release-notes.qmd @@ -1,7 +1,7 @@ --- title: "September 25, 2024" aliases: - - ../../2024-sep-25/release-notes.html + - /releases/2024-sep-25/release-notes.html --- ::: {.highlights} diff --git a/site/releases/_how-to-upgrade.qmd b/site/releases/_how-to-upgrade.qmd index 561c30ce0d..2210e3fe82 100644 --- a/site/releases/_how-to-upgrade.qmd +++ b/site/releases/_how-to-upgrade.qmd @@ -11,7 +11,7 @@ To access the latest version of the {{< var validmind.platform >}},^[[Log in to #### {{< var validmind.developer >}} -To upgrade the {{< var validmind.developer >}}:^[[Get started with the {{< var validmind.developer >}}](/developer/get-started-validmind-library.qmd)] +To upgrade the {{< var validmind.developer >}}:^[[{{< var validmind.developer >}}](/developer/validmind-library.qmd)] 1. In your Jupyter Notebook: diff --git a/site/support/support.qmd b/site/support/support.qmd index 5b5b7cc5c5..7b7e56d0cd 100644 --- a/site/support/support.qmd +++ b/site/support/support.qmd @@ -2,7 +2,7 @@ title: "Support" date: last-modified aliases: - - ../guide/support.html + - /guide/support.html listing: - id: find-information type: grid diff --git a/site/support/troubleshooting.qmd b/site/support/troubleshooting.qmd index 150389b7bc..660d958638 100644 --- a/site/support/troubleshooting.qmd +++ b/site/support/troubleshooting.qmd @@ -3,7 +3,7 @@ title: "Troubleshooting" date: last-modified toc-depth: 2 aliases: - - ../guide/troubleshooting.html + - /guide/troubleshooting.html listing: id: more type: grid diff --git a/site/training/administrator-fundamentals/administrator-fundamentals.qmd b/site/training/administrator-fundamentals/administrator-fundamentals.qmd index 3e12969de6..a1b8a202b4 100644 --- a/site/training/administrator-fundamentals/administrator-fundamentals.qmd +++ b/site/training/administrator-fundamentals/administrator-fundamentals.qmd @@ -66,7 +66,7 @@ Be sure to return to this page afterwards. ::: -# You're in — let's show you around. {background-color="#083E44" background-image="../assets/home-hero.svg"} +# You're in — let's show you around. {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {background-iframe="https://app.prod.validmind.ai/settings" data-preload="yes"} @@ -109,7 +109,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. # PART 1 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# Set up your organization {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Set up your organization {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -148,7 +148,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Onboard users {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Onboard users {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -195,7 +195,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. :::: ::: -# Manage roles and permissions {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Manage roles and permissions {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -248,7 +248,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. :::: ::: -# Manage groups {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Manage groups {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -283,7 +283,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. # PART 2 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# Customize templates {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Customize templates {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -320,7 +320,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Configure workflows {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Configure workflows {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} diff --git a/site/training/developer-fundamentals/developer-fundamentals.qmd b/site/training/developer-fundamentals/developer-fundamentals.qmd index 56d0e3845b..04b86ad528 100644 --- a/site/training/developer-fundamentals/developer-fundamentals.qmd +++ b/site/training/developer-fundamentals/developer-fundamentals.qmd @@ -148,7 +148,7 @@ When you're done, click [{{< fa chevron-right >}}]() to
continue. # PART 1 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# Initialize the {{< var vm.developer >}} {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Initialize the {{< var vm.developer >}} {background-color="#083E44" background-image="/training/assets/home-hero.svg"} @@ -162,7 +162,7 @@ When you are done, return to this page and click [{{< fa chevron-right >}}]() to ::: :::: -# Start the model development process {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Start the model development process {background-color="#083E44" background-image="/training/assets/home-hero.svg"} @@ -176,7 +176,7 @@ When you reach **Add individual test results to model documentation**, return to ::: :::: -# Edit model documentation {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Edit model documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -240,7 +240,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Collaborate with others {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Collaborate with others {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -285,7 +285,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. # PART 2 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# Train a model {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Train a model {background-color="#083E44" background-image="/training/assets/home-hero.svg"} @@ -299,7 +299,7 @@ When you are done, return to this page and click [{{< fa chevron-right >}}]() to ::: :::: -# Implement custom tests and integrate external test providers {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Implement custom tests and integrate external test providers {background-color="#083E44" background-image="/training/assets/home-hero.svg"} @@ -313,7 +313,7 @@ When you are done, return to this page and click [{{< fa chevron-right >}}]() to ::: :::: -# Finalize testing and documentation {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Finalize testing and documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} @@ -330,7 +330,7 @@ When you are done, return to this page and click [{{< fa chevron-right >}}]() to # PART 3 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# View documentation activity {background-color="#083E44" background-image="../assets/home-hero.svg"} +# View documentation activity {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -385,7 +385,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Submit for approval {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Submit for approval {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} diff --git a/site/training/training-templates/course-slides.qmd b/site/training/training-templates/course-slides.qmd index b8d57639c0..c4e7887bcc 100644 --- a/site/training/training-templates/course-slides.qmd +++ b/site/training/training-templates/course-slides.qmd @@ -61,7 +61,7 @@ Be sure to return to this page afterwards. ::: -# iFrame embed right {background-color="#083E44" background-image="../assets/home-hero.svg"} +# iFrame embed right {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {background-iframe="https://app.prod.validmind.ai/" data-preload="yes"} @@ -80,7 +80,7 @@ As you can see, you need to get creative
with some of your `
`s to determ :::: -# iFrame embed bottom {background-color="#083E44" background-image="../assets/home-hero.svg"} +# iFrame embed bottom {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html" background-interactive="yes" data-preload="yes"} @@ -92,7 +92,7 @@ You can fully interact with the page contents. ::: :::: -# iFrame embed footer {background-color="#083E44" background-image="../assets/home-hero.svg"} +# iFrame embed footer {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {background-iframe="https://app.prod.validmind.ai/settings" background-interactive="true" data-preload="yes"} @@ -108,7 +108,7 @@ This will also stretch the overlay to 95%. :::: ::: -# Scrollable single user guide {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Scrollable single user guide {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -153,7 +153,7 @@ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed quis sapien blandit :::: -# Scrollable tabset user guides {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Scrollable tabset user guides {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} diff --git a/site/training/training.qmd b/site/training/training.qmd index 6b9d443e6a..4ed4c33538 100644 --- a/site/training/training.qmd +++ b/site/training/training.qmd @@ -4,10 +4,10 @@ sidebar: false repo-actions: false toc: false aliases: - - ../training.html + - /training.html - index.html - training-overview.html - - ../guide/get-started-sandbox.html + - /guide/get-started-sandbox.html listing: - id: training-curriculum type: grid diff --git a/site/training/validator-fundamentals/validator-fundamentals.qmd b/site/training/validator-fundamentals/validator-fundamentals.qmd index a24975c961..75feca543c 100644 --- a/site/training/validator-fundamentals/validator-fundamentals.qmd +++ b/site/training/validator-fundamentals/validator-fundamentals.qmd @@ -130,7 +130,7 @@ Explore existing reports,
then click [{{< fa chevron-right >}}]() to continu # PART 1 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# Review documentation {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Review documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -209,7 +209,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Analyze test results {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Analyze test results {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -248,7 +248,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Prepare validation reports {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Prepare validation reports {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -366,7 +366,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. # PART 2 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} -# Track issue resolution {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Track issue resolution {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable} @@ -403,7 +403,7 @@ When you are done, click [{{< fa chevron-right >}}]() to continue. ::: :::: -# Submit for review and approval {background-color="#083E44" background-image="../assets/home-hero.svg"} +# Submit for review and approval {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {.scrollable}