diff --git a/.github/actions/demo-notebook/action.yml b/.github/actions/demo-notebook/action.yml index 8e014230b8..898123341b 100644 --- a/.github/actions/demo-notebook/action.yml +++ b/.github/actions/demo-notebook/action.yml @@ -1,4 +1,4 @@ -name: Execute demo notebook +name: Execute demo notebooks description: Installs python3, validmind, checks dependencies then executes ONLY the Intro for Model Developers notebook with development heap tracking inputs: @@ -24,6 +24,11 @@ runs: pip install shap==0.44.1 pip install anywidget + - name: Register validmind kernel + shell: bash + run: | + python -m ipykernel install --user --name validmind --display-name "ValidMind Library" + - name: Ensure .env file is available shell: bash id: find_env @@ -33,14 +38,14 @@ runs: exit 1 fi - - name: Execute ONLY the Intro for Model Developers notebook with heap development + - name: Execute ONLY the ValidMind for model development series with heap development shell: bash if: ${{ steps.find_env.outcome == 'success' }} run: | cd site source ../${{ inputs.env_file }} - quarto render --profile exe-demo notebooks/tutorials/intro_for_model_developers_EXECUTED.ipynb &> render_errors.log || { - echo "Execute for intro_for_model_developers_EXECUTED.ipynb failed"; + quarto render --profile exe-demo notebooks/EXECUTED/model_development &> render_errors.log || { + echo "Execute for ValidMind for model development series failed"; cat render_errors.log; exit 1; } diff --git a/.github/actions/prod-notebook/action.yml b/.github/actions/prod-notebook/action.yml index ce8612da46..fd86151175 100644 --- a/.github/actions/prod-notebook/action.yml +++ b/.github/actions/prod-notebook/action.yml @@ -1,4 +1,4 @@ -name: Execute prod notebook +name: Execute prod notebooks description: Installs python3, validmind, checks dependencies then executes ONLY the Intro for Model Developers notebook with production heap tracking inputs: @@ -24,6 +24,11 @@ runs: pip install shap==0.44.1 pip install anywidget + - name: Register validmind kernel + shell: bash + run: | + python -m ipykernel install --user --name validmind --display-name "ValidMind Library" + - name: Ensure .env file is available shell: bash id: find_env @@ -33,14 +38,14 @@ runs: exit 1 fi - - name: Execute ONLY the Intro for Model Developers notebook with heap production + - name: Execute ONLY the ValidMind for model development series with heap production shell: bash if: ${{ steps.find_env.outcome == 'success' }} run: | cd site source ../${{ inputs.env_file }} - quarto render --profile exe-prod notebooks/tutorials/intro_for_model_developers_EXECUTED.ipynb &> render_errors.log || { - echo "Execute for intro_for_model_developers_EXECUTED.ipynb failed"; + quarto render --profile exe-prod notebooks/EXECUTED/model_development &> render_errors.log || { + echo "Execute for ValidMind for model development series failed"; cat render_errors.log; exit 1; } \ No newline at end of file diff --git a/.github/actions/staging-notebook/action.yml b/.github/actions/staging-notebook/action.yml index f53d395380..d5a2b8dff3 100644 --- a/.github/actions/staging-notebook/action.yml +++ b/.github/actions/staging-notebook/action.yml @@ -1,4 +1,4 @@ -name: Execute staging notebook +name: Execute staging notebooks description: Installs python3, validmind, checks dependencies then executes ONLY the Intro for Model Developers notebook with staging heap tracking inputs: @@ -24,6 +24,11 @@ runs: pip install shap==0.44.1 pip install anywidget + - name: Register validmind kernel + shell: bash + run: | + python -m ipykernel install --user --name validmind --display-name "ValidMind Library" + - name: Ensure .env file is available shell: bash id: find_env @@ -33,14 +38,14 @@ runs: exit 1 fi - - name: Execute ONLY the Intro for Model Developers notebook with heap staging + - name: Execute ONLY the ValidMind for model development series with heap staging shell: bash if: ${{ steps.find_env.outcome == 'success' }} run: | cd site source ../${{ inputs.env_file }} - quarto render --profile exe-staging notebooks/tutorials/intro_for_model_developers_EXECUTED.ipynb &> render_errors.log || { - echo "Execute for intro_for_model_developers_EXECUTED.ipynb failed"; + quarto render --profile exe-staging notebooks/EXECUTED/model_development &> render_errors.log || { + echo "Execute for iValidMind for model development series failed"; cat render_errors.log; exit 1; } \ No newline at end of file diff --git a/.github/workflows/deploy-docs-prod.yaml b/.github/workflows/deploy-docs-prod.yaml index b7b11ae178..df39fff357 100644 --- a/.github/workflows/deploy-docs-prod.yaml +++ b/.github/workflows/deploy-docs-prod.yaml @@ -59,7 +59,7 @@ jobs: cat .env # Only execute the prod notebook if .env file is created - - name: Execute prod Intro for Model Developers notebook + - name: Execute prod ValidMind for model development series if: ${{ steps.create_env.outcome == 'success' }} uses: ./.github/actions/prod-notebook id: execute-prod-notebook diff --git a/.github/workflows/deploy-docs-staging.yaml b/.github/workflows/deploy-docs-staging.yaml index c5ddf48fd3..c67d75cf04 100644 --- a/.github/workflows/deploy-docs-staging.yaml +++ b/.github/workflows/deploy-docs-staging.yaml @@ -49,7 +49,7 @@ jobs: cat .env # Only execute the staging notebook if .env file is created - - name: Execute staging Intro for Model Developers notebook + - name: Execute staging ValidMind for model development series if: ${{ steps.create_env.outcome == 'success' }} uses: ./.github/actions/staging-notebook id: execute-staging-notebook diff --git a/.github/workflows/validate-docs-site.yaml b/.github/workflows/validate-docs-site.yaml index 8b014ebebc..13e500f8c9 100644 --- a/.github/workflows/validate-docs-site.yaml +++ b/.github/workflows/validate-docs-site.yaml @@ -72,8 +72,8 @@ jobs: cat .env # Only execute the demo notebook if .env file is created - - name: Execute demo Intro for Model Developers notebook - if: ${{ env.ENABLE_DEMO_NOTEBOOK == 'true' && steps.create_env.outcome == 'success' }} + - name: Execute demo ValidMind for model development series + if: ${{ vars.ENABLE_DEMO_NOTEBOOK == 'true' && steps.create_env.outcome == 'success' }} uses: ./.github/actions/demo-notebook id: execute-demo-notebook with: diff --git a/site/Makefile b/site/Makefile index 01d212d294..31ed939b9d 100644 --- a/site/Makefile +++ b/site/Makefile @@ -2,7 +2,7 @@ DEST_DIR_NB := notebooks DEST_DIR_PYTHON := validmind DEST_DIR_TESTS := tests -FILE_PATH := notebooks/tutorials/intro_for_model_developers_EXECUTED.ipynb +FILE_PATH := notebooks/EXECUTED/model_development GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) INSTALLATION_BRANCH := main LIBRARY_BRANCH ?= $(or $(BRANCH),main) @@ -43,6 +43,7 @@ clean: @echo "\nDeleting copies of files sourced from other repos ..." rm -rf $(SRC_DIR) rm -rf $(SRC_ROOT) + rm -rf notebooks/EXECUTED/model_development/ # Prompt for a branch or release tag and clone the repository clone: @@ -122,7 +123,7 @@ docs-site: get-source quarto render --profile production @$(MAKE) execute PROFILE=exe-prod -# Will default to `exe-demo` profile & the `notebooks/tutorials/intro_for_model_developers_EXECUTED.ipynb` if no input provided +# Will default to `exe-demo` profile & the `notebooks/EXECUTED/model_development` if no input provided execute: quarto render --profile $(PROFILE) $(FILE_PATH) @@ -135,8 +136,9 @@ notebooks: @rm -f notebooks.zip @rm -rf $(DEST_DIR_NB)/ && mkdir -p $(DEST_DIR_NB) @cp -r $(SRC_DIR)/notebooks/. $(DEST_DIR_NB)/ - @echo "Duplicating notebooks/tutorials/intro_for_model_developers.ipynb for execution" - @cp notebooks/tutorials/intro_for_model_developers.ipynb notebooks/tutorials/intro_for_model_developers_EXECUTED.ipynb + @echo "Duplicating all files from notebooks/tutorials/model_development/ for execution" + @mkdir -p notebooks/EXECUTED/model_development/ + @cp -r notebooks/tutorials/model_development/* notebooks/EXECUTED/model_development/ @echo "Copying LICENSE into notebooks ..." @cp -r $(SRC_DIR)/LICENSE $(DEST_DIR_NB)/ @rm -rf $(DEST_DIR_NB)/code_sharing diff --git a/site/_quarto.yml b/site/_quarto.yml index 79e5e5297b..5bbff296b5 100644 --- a/site/_quarto.yml +++ b/site/_quarto.yml @@ -92,6 +92,8 @@ website: file: guide/guides.qmd - text: "{{< fa envelope-open-text >}} Support" file: support/support.qmd + - text: "{{< fa bullhorn >}} Releases" + file: releases/all-releases.qmd - text: "---" - text: "{{< fa cube >}} Python Library" - text: "{{< fa code >}} {{< var validmind.developer >}}" @@ -105,18 +107,18 @@ website: # file: https://validmind.com/ # target: _blank # TRAINING MENU FOR ACADEMY SECTION - - text: "{{< fa graduation-cap >}} Training" - menu: - - text: "{{< fa house >}} ValidMind Academy" - file: training/training.qmd - - text: "---" - - text: "{{< fa building-columns >}} Fundamentals" - - text: "{{< fa gear >}} For Administrators" - file: training/administrator-fundamentals/administrator-fundamentals-register.qmd - - text: "{{< fa code >}} For Developers" - file: training/developer-fundamentals/developer-fundamentals-register.qmd - - text: "{{< fa user-check >}} For Validators" - file: training/validator-fundamentals/validator-fundamentals-register.qmd + # - text: "{{< fa graduation-cap >}} Training" + # menu: + # - text: "{{< fa house >}} ValidMind Academy" + # file: training/training.qmd + # - text: "---" + # - text: "{{< fa building-columns >}} Fundamentals" + # - text: "{{< fa gear >}} For Administrators" + # file: training/administrator-fundamentals/administrator-fundamentals-register.qmd + # - text: "{{< fa code >}} For Developers" + # file: training/developer-fundamentals/developer-fundamentals-register.qmd + # - text: "{{< fa user-check >}} For Validators" + # file: training/validator-fundamentals/validator-fundamentals-register.qmd - text: "Log In" menu: - text: "Public Internet" diff --git a/site/about/fine-print/data-privacy-policy.qmd b/site/about/fine-print/data-privacy-policy.qmd index 3c19f6f687..f855e6dc74 100644 --- a/site/about/fine-print/data-privacy-policy.qmd +++ b/site/about/fine-print/data-privacy-policy.qmd @@ -5,7 +5,7 @@ date: last-modified listing: id: legal-ease type: grid - grid-columns: 2 + grid-columns: 1 contents: - path: https://validmind.com/about/legal/ title: "Legal Ease" @@ -20,6 +20,17 @@ This page outlines {{< var vm.product >}}'s data privacy policy, explaining how Our goal is to provide transparency about our data handling practices and to demonstrate our commitment to protecting your privacy and data security. +::: {.column-margin} +[commitment to transparency]{.smallcaps .pink} + +Understanding our policies shouldn’t feel like deciphering code, so we’ve made our legal texts as clear and accessible as possible: +

+ +::: {#legal-ease} +::: + +::: + ## What is {{< var vm.product >}}'s data privacy policy? The key points of our data privacy policy include: @@ -63,10 +74,3 @@ When you generate documentation or run tests, {{< var vm.product >}} imports the The {{< var validmind.developer >}} does not send any personally identifiable information (PII) through our {{< var validmind.api >}}. -## A commitment to transparency - -Understanding our policies shouldn’t feel like deciphering code, so we’ve made our legal texts as clear and accessible as possible: - -::: {#legal-ease} -::: - diff --git a/site/developer/validmind-library.qmd b/site/developer/validmind-library.qmd index 818d657eff..23e809b765 100644 --- a/site/developer/validmind-library.qmd +++ b/site/developer/validmind-library.qmd @@ -19,8 +19,6 @@ listing: - path: https://youtu.be/rIR8Mql7eGs title: "{{< fa brands youtube >}} {{< var vm.product >}} QuickStart" description: "Watch the walkthrough on YouTube: `https://youtu.be/rIR8Mql7eGs`" - # - ../notebooks/tutorials/intro_for_model_developers.ipynb -# - developer-getting-started-video.qmd - id: model-development type: grid grid-columns: 2 diff --git a/site/guide/model-documentation/_test-result-metadata.qmd b/site/guide/model-documentation/_test-result-metadata.qmd new file mode 100644 index 0000000000..10d8d0e95c --- /dev/null +++ b/site/guide/model-documentation/_test-result-metadata.qmd @@ -0,0 +1,5 @@ +After you have added a test result to your document, you can view the following information attached to the result: + +- History of values for the result +- What users wrote those results +- Relevant inputs associated with the result \ No newline at end of file diff --git a/site/guide/model-documentation/_view-test-result-metadata.qmd b/site/guide/model-documentation/_view-test-result-metadata.qmd new file mode 100644 index 0000000000..2d3a0b006b --- /dev/null +++ b/site/guide/model-documentation/_view-test-result-metadata.qmd @@ -0,0 +1,8 @@ +1. Locate the test result whose metadata you want to view. + +1. Hover over the test result until the top-right hand corner menu appears and select **{{< fa clock >}} See Timeline**: + + - The [Active]{.green-bg} (most recent) test result is highlighted. + - On the test result timeline, click on the **{{< fa chevron-down >}}** associated with a test run to expand for details, such as **Model Information** or **Dataset Information**. + + When you are done, you can either click **Cancel** or **{{< fa x >}}** to close the metadata menu. \ No newline at end of file diff --git a/site/guide/model-documentation/content_blocks/_generate-with-ai.qmd b/site/guide/model-documentation/content_blocks/_generate-with-ai.qmd new file mode 100644 index 0000000000..b8079a3689 --- /dev/null +++ b/site/guide/model-documentation/content_blocks/_generate-with-ai.qmd @@ -0,0 +1,38 @@ + +:::: {.content-visible unless-format="revealjs"} +1. Click **{{< fa diamond >}} [beta]{.smallcaps} (Generate Text with AI)** in the toolbar while editing a content block. + +1. Enter an optional prompt to guide the output, then click **{{< fa check >}} Generate** to compose a draft for review. + +1. Review the draft composed by the {{< var vm.product >}} AI Content Builder for accuracy and relevance, then: + + - Click **{{< fa download >}} Accept Text** to insert the draft into your content block. + - Click **{{< fa rotate >}} Try Again** to regenerate a different draft. + - Click **Cancel** to discard the draft and return to your documentation section. + +1. After you insert the AI-generated draft, click on the text box to make the necessary edits and adjustments to your copy: + + - Ensure that content is in compliance with the quality guidelines outlined by your organization. + - Use the content editing toolbar^[[Content editing toolbar](#content-editing-toolbar)] just as you would with any other text block. + +![Generating content with AI within a simple text block](/guide/model-documentation/generate-with-ai.gif){width=90% fig-alt="An animation that showcases the Generate with AI feature within a simple text block" .screenshot} + +:::: + + +:::: {.content-hidden unless-format="revealjs"} +1. Click **{{< fa diamond >}} [beta]{.smallcaps} (Generate Text with AI)** in the toolbar while editing a content block. + +1. Enter an optional prompt to guide the output, then click **{{< fa check >}} Generate** to compose a draft for review. + +1. Review the draft composed by the {{< var vm.product >}} AI Content Builder for accuracy and relevance, then: + + - Click **{{< fa download >}} Accept Text** to insert the draft into your content block. + - Click **{{< fa rotate >}} Try Again** to regenerate a different draft. + - Click **Cancel** to discard the draft and return to your documentation section. + +1. After you insert the AI-generated draft, click on the text box to make the necessary edits and adjustments to your copy, ensure that content is in compliance with the quality guidelines outlined by your organization. + +![Generating content with AI within a simple text block](/guide/model-documentation/generate-with-ai.gif){width=90% fig-alt="An animation that showcases the Generate with AI feature within a simple text block" .screenshot} + +:::: \ No newline at end of file diff --git a/site/guide/model-documentation/test-run-details.gif b/site/guide/model-documentation/test-run-details.gif deleted file mode 100644 index 738bf68576..0000000000 Binary files a/site/guide/model-documentation/test-run-details.gif and /dev/null differ diff --git a/site/guide/model-documentation/work-with-content-blocks.qmd b/site/guide/model-documentation/work-with-content-blocks.qmd index 2bbcb38ce4..29c83a3e44 100644 --- a/site/guide/model-documentation/work-with-content-blocks.qmd +++ b/site/guide/model-documentation/work-with-content-blocks.qmd @@ -5,9 +5,9 @@ aliases: - /guide/work-with-content-blocks.html --- -Make edits to your model documentation, validation reports, or ongoing monitoring plans by adding or removing content blocks directly in the online editor. +Make edits to your model documentation, validation reports, or ongoing monitoring plans by adding or removing content blocks directly in the online editor. -## What are content blocks? +## What are content blocks? Content blocks provide you with sections that are part of a template, and are used in model documentation, validation reports, and ongoing monitoring plans. @@ -90,24 +90,9 @@ Generating content drafts for your model documentation works best after you've l While editing a simple text block, you can have {{< var vm.product >}} assist you with generating content drafts: -1. Click **{{< fa diamond >}} [beta]{.smallcaps} (Generate Text with AI)** in the toolbar while editing a content block. +{{< include content_blocks/_generate-with-ai.qmd >}} -2. Enter an optional prompt to guide the output, then click **{{< fa check >}} Generate** to compose a draft for review. - -3. Review the draft composed by the {{< var vm.product >}} AI Content Builder for accuracy and relevance, then: - - - Click **{{< fa download >}} Accept Text** to insert the draft into your content block. - - Click **{{< fa rotate >}} Try Again** to regenerate a different draft.g - - Click **Cancel** to discard the draft and return to your documentation section. - -4. After you insert the AI-generated draft, click on the text box to make the necessary edits and adjustments to your copy: - - - Ensure that content is in compliance with the quality guidelines outlined by your organization. - - Use the content editing toolbar[^9] just as you would with any other text block. - -![Generating content with AI within a simple text block](generate-with-ai.gif){width=90% fig-alt="An animation that showcases the Generate with AI feature within a simple text block" .screenshot} - -When generating content drafts with AI, accepted versions and edits are retained in your {{< fa wifi >}} Model Activity[^10] just like other updates to your documentation, reports, or plans. +When generating content drafts with AI, accepted versions and edits are retained in your {{< fa wifi >}} Model Activity[^9] just like other updates to your documentation, reports, or plans. ## Remove content blocks @@ -118,7 +103,7 @@ Test-driven or metric over time blocks can be re-added later on but **text block 1. In the left sidebar, click **{{< fa cubes >}} Inventory**. -2. Select a model or find your model by applying a filter or searching for it.[^11] +2. Select a model or find your model by applying a filter or searching for it.[^10] 3. In the left sidebar that appears for your model, click **{{< fa book-open >}} Documentation**, **{{< fa shield >}} Validation Report**, or **{{< fa desktop >}} Ongoing Monitoring**. @@ -165,8 +150,6 @@ Test-driven or metric over time blocks can be re-added later on but **text block [^8]: [Run tests and test suites](/developer/model-testing/testing-overview.qmd) -[^9]: [Content editing toolbar](#content-editing-toolbar) - -[^10]: [View model activity](/guide/model-inventory/view-model-activity.qmd) +[^9]: [View model activity](/guide/model-inventory/view-model-activity.qmd) -[^11]: [Working with the model inventory](/guide/model-inventory/working-with-model-inventory.qmd#search-filter-and-sort-models) +[^10]: [Working with the model inventory](/guide/model-inventory/working-with-model-inventory.qmd#search-filter-and-sort-models) diff --git a/site/guide/model-documentation/work-with-test-results.qmd b/site/guide/model-documentation/work-with-test-results.qmd index c68ab9ad47..640d929f21 100644 --- a/site/guide/model-documentation/work-with-test-results.qmd +++ b/site/guide/model-documentation/work-with-test-results.qmd @@ -43,11 +43,7 @@ Once generated via the {{< var validmind.developer >}}, view and add the test re ## View test result metadata -After you have added a test result to your document, you can view the following information attached to the result: - -- History of values for the result -- What users wrote those results -- Relevant inputs associated with the result +{{< include /guide/model-documentation/_test-result-metadata.qmd >}} 1. In the left sidebar, click **{{< fa cubes >}} Inventory**. @@ -55,14 +51,7 @@ After you have added a test result to your document, you can view the following 3. In the left sidebar that appears for your model, click **{{< fa book-open >}} Documentation**, **{{< fa shield >}} Validation Report**, or **{{< fa desktop >}} Ongoing Monitoring**. -4. Locate the test result whose metadata you want to view. - -5. Under the test result's name, click on the row indicating the currently [Active]{.green-bg} test result. - - - On the test result timeline, click on the **{{< fa chevron-down >}}** associated with a test run to expand for details. - - When you are done, you can either click **Cancel** or **{{< fa x >}}** to close the metadata menu. - - ![Detail expansion of test runs on the test result timeline](test-run-details.gif){width=85% fig-alt="A gif showcasing detail expansion of test runs on the test result timeline" .screenshot} +{{< include /guide/model-documentation/_view-test-result-metadata.qmd >}} #### Filter historical test results diff --git a/site/guide/model-inventory/_view-model-activity-steps.qmd b/site/guide/model-inventory/_view-model-activity-steps.qmd index de086b47db..954de8cdf3 100644 --- a/site/guide/model-inventory/_view-model-activity-steps.qmd +++ b/site/guide/model-inventory/_view-model-activity-steps.qmd @@ -12,7 +12,7 @@ To view model activity: :::: {.content-hidden unless-format="revealjs"} 1. In the left sidebar, click **{{< fa cubes >}} Inventory**. -1. Select a model or [find your model by applying a filter or searching for it](/guide/model-inventory/working-with-model-inventory.qmd#search-filter-and-sort-models). +1. Select a model or [find your model by applying a filter or searching for it](/guide/model-inventory/working-with-model-inventory.qmd#search-filter-and-sort-models){target="_blank"}. 1. In the expanded sidebar that appears for your model, click **{{< fa wifi >}} Model Activity**. :::: diff --git a/site/notebooks.zip b/site/notebooks.zip index 6190e6637c..972005fa8d 100644 Binary files a/site/notebooks.zip and b/site/notebooks.zip differ diff --git a/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb b/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb new file mode 100644 index 0000000000..46a002a83d --- /dev/null +++ b/site/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb @@ -0,0 +1,427 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b6fa2ac0", + "metadata": {}, + "source": [ + "# ValidMind for model development 1 — Set up the ValidMind Library\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process based on common model development scenarios with our series of four introductory notebooks. This first notebook walks you through the initial setup of the ValidMind Library.\n", + "\n", + "These notebooks use a binary classification model as an example, but the same principles shown here apply to other model types." + ] + }, + { + "cell_type": "markdown", + "id": "fe2e0eca", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Introduction](#toc1_) \n", + "- [About ValidMind](#toc2_) \n", + " - [Before you begin](#toc2_1_) \n", + " - [New to ValidMind?](#toc2_2_) \n", + " - [Key concepts](#toc2_3_) \n", + "- [Initializing the ValidMind Library](#toc3_) \n", + " - [Install the ValidMind Library](#toc3_1_) \n", + " - [Initialize the ValidMind Library](#toc3_2_) \n", + " - [Get your code snippet](#toc3_2_1_) \n", + "- [Getting to know ValidMind](#toc4_) \n", + " - [Preview the documentation template](#toc4_1_) \n", + " - [View model documentation in the ValidMind Platform](#toc4_1_1_) \n", + " - [Explore available tests](#toc4_2_) \n", + "- [Upgrade ValidMind](#toc5_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Start the model development process](#toc7_1_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "id": "814da22c", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Introduction\n", + "\n", + "Model development aims to produce a fit-for-purpose *champion model* by conducting thorough testing and analysis, supporting the capabilities of the model with evidence in the form of documentation and test results. Model documentation should be clear and comprehensive, ideally following a structure or template covering all aspects of compliance with model risk regulation.\n", + "\n", + "A *binary classification model* is a type of predictive model used in churn analysis to identify customers who are likely to leave a service or subscription by analyzing various behavioral, transactional, and demographic factors.\n", + "\n", + "- This model helps businesses take proactive measures to retain at-risk customers by offering personalized incentives, improving customer service, or adjusting pricing strategies.\n", + "- Effective validation of a churn prediction model ensures that businesses can accurately identify potential churners, optimize retention efforts, and enhance overall customer satisfaction while minimizing revenue loss." + ] + }, + { + "cell_type": "markdown", + "id": "4b966a95", + "metadata": {}, + "source": [ + "\n", + "\n", + "## About ValidMind\n", + "\n", + "ValidMind is a suite of tools for managing model risk, including risk associated with AI and statistical models. \n", + "\n", + "You use the ValidMind Library to automate documentation and validation tests, and then use the ValidMind Platform to collaborate on model documentation. Together, these products simplify model risk management, facilitate compliance with regulations and institutional standards, and enhance collaboration between yourself and model validators." + ] + }, + { + "cell_type": "markdown", + "id": "87936431", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Before you begin\n", + "\n", + "This notebook assumes you have basic familiarity with Python, including an understanding of how functions work. If you are new to Python, you can still run the notebook but we recommend further familiarizing yourself with the language. \n", + "\n", + "If you encounter errors due to missing modules in your Python environment, install the modules with `pip install`, and then re-run the notebook. For more help, refer to [Installing Python Modules](https://docs.python.org/3/installing/index.html)." + ] + }, + { + "cell_type": "markdown", + "id": "cb9f8dc1", + "metadata": {}, + "source": [ + "\n", + "\n", + "### New to ValidMind?\n", + "\n", + "If you haven't already seen our documentation on the [ValidMind Library](https://docs.validmind.ai/developer/validmind-library.html), we recommend you begin by exploring the available resources in this section. There, you can learn more about documenting models and running tests, as well as find code samples and our Python Library API reference.\n", + "\n", + "
For access to all features available in this notebook, create a free ValidMind account.\n", + "

\n", + "Signing up is FREE — Register with ValidMind
" + ] + }, + { + "cell_type": "markdown", + "id": "a0d16aca", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Key concepts\n", + "\n", + "**Model documentation**: A structured and detailed record pertaining to a model, encompassing key components such as its underlying assumptions, methodologies, data sources, inputs, performance metrics, evaluations, limitations, and intended uses. It serves to ensure transparency, adherence to regulatory requirements, and a clear understanding of potential risks associated with the model’s application.\n", + "\n", + "**Documentation template**: Functions as a test suite and lays out the structure of model documentation, segmented into various sections and sub-sections. Documentation templates define the structure of your model documentation, specifying the tests that should be run, and how the results should be displayed.\n", + "\n", + "**Tests**: A function contained in the ValidMind Library, designed to run a specific quantitative test on the dataset or model. Tests are the building blocks of ValidMind, used to evaluate and document models and datasets, and can be run individually or as part of a suite defined by your model documentation template.\n", + "\n", + "**Metrics**: A subset of tests that do not have thresholds. In the context of this notebook, metrics and tests can be thought of as interchangeable concepts.\n", + "\n", + "**Custom metrics**: Custom metrics are functions that you define to evaluate your model or dataset. These functions can be registered with the ValidMind Library to be used in the ValidMind Platform.\n", + "\n", + "**Inputs**: Objects to be evaluated and documented in the ValidMind Library. They can be any of the following:\n", + "\n", + " - **model**: A single model that has been initialized in ValidMind with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model).\n", + " - **dataset**: Single dataset that has been initialized in ValidMind with [`vm.init_dataset()`](https://docs.validmind.ai/validmind/validmind.html#init_dataset).\n", + " - **models**: A list of ValidMind models - usually this is used when you want to compare multiple models in your custom metric.\n", + " - **datasets**: A list of ValidMind datasets - usually this is used when you want to compare multiple datasets in your custom metric. (Learn more: [Run tests with multiple datasets](https://docs.validmind.ai/notebooks/how_to/run_tests_that_require_multiple_datasets.html))\n", + "\n", + "**Parameters**: Additional arguments that can be passed when running a ValidMind test, used to pass additional information to a metric, customize its behavior, or provide additional context.\n", + "\n", + "**Outputs**: Custom metrics can return elements like tables or plots. Tables may be a list of dictionaries (each representing a row) or a pandas DataFrame. Plots may be matplotlib or plotly figures.\n", + "\n", + "**Test suites**: Collections of tests designed to run together to automate and generate model documentation end-to-end for specific use-cases.\n", + "\n", + "Example: the [`classifier_full_suite`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html#ClassifierFullSuite) test suite runs tests from the [`tabular_dataset`](https://docs.validmind.ai/validmind/validmind/test_suites/tabular_datasets.html) and [`classifier`](https://docs.validmind.ai/validmind/validmind/test_suites/classifier.html) test suites to fully document the data and model sections for binary classification model use-cases." + ] + }, + { + "cell_type": "markdown", + "id": "215d62a7", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Initializing the ValidMind Library\n", + "\n", + "The ValidMind Library provides a rich collection of documentation tools and test suites, from documenting descriptions of datasets to validation and testing of models using a variety of open-source testing frameworks." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Install the ValidMind Library\n", + "\n", + "
Recommended Python versions\n", + "

\n", + "Python 3.8 <= x <= 3.11
\n", + "\n", + "To install the library:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "827eb6bd", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -q validmind" + ] + }, + { + "cell_type": "markdown", + "id": "5e37f9fe", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "ValidMind generates a unique _code snippet_ for each registered model to connect with your developer environment. You initialize the ValidMind Library with this code snippet, which ensures that your documentation and tests are uploaded to the correct model when you run the notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Get your code snippet\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and click **+ Register Model**.\n", + "\n", + "3. Enter the model details and click **Continue**. ([Need more help?](https://docs.validmind.ai/guide/model-inventory/register-models-in-inventory.html))\n", + "\n", + " For example, to register a model for use with this notebook, select:" + ] + }, + { + "cell_type": "markdown", + "id": "48eb92b3", + "metadata": {}, + "source": [ + " - Documentation template: `Binary classification`\n", + " - Use case: `Marketing/Sales - Attrition/Churn Management`" + ] + }, + { + "cell_type": "markdown", + "id": "install-credentials-50e67128-2eb5-470a-aeaf-1c692fd3f847", + "metadata": {}, + "source": [ + " You can fill in other options according to your preference.\n", + " \n", + "4. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a58d951f", + "metadata": {}, + "outputs": [], + "source": [ + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "99cf2df8", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Getting to know ValidMind" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Preview the documentation template\n", + "\n", + "Let's verify that you have connected the ValidMind Library to the ValidMind Platform and that the appropriate *template* is selected for your model. A template predefines sections for your model documentation and provides a general outline to follow, making the documentation process much easier.\n", + "\n", + "You will upload documentation and test results unique to your model based on this template later on. For now, **take a look at the default structure that the template provides with [the `vm.preview_template()` function](https://docs.validmind.ai/validmind/validmind.html#preview_template)** from the ValidMind library and note the empty sections:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "819a40bc", + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "id": "cf63d701", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### View model documentation in the ValidMind Platform\n", + "\n", + "Next, let's head to the ValidMind Platform to see the template in action:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Click on the **Documentation** for your model and note how the structure of the documentation matches our preview above." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Explore available tests\n", + "\n", + "Next, let's explore the list of all available tests in the ValidMind Library with [the `vm.tests.list_tests()` function](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) — we'll learn how to run tests shortly. \n", + "\n", + "You can see that the documentation template for this model has references to some of the **test `ID`s used to run tests listed below:**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ccc7776", + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.list_tests()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Upgrade ValidMind\n", + "\n", + "
After installing ValidMind, you’ll want to periodically make sure you are on the latest version to access any new features and other enhancements.
\n", + "\n", + "Retrieve the information for the currently installed version of ValidMind:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5d3216d", + "metadata": {}, + "outputs": [], + "source": [ + "%pip show validmind" + ] + }, + { + "cell_type": "markdown", + "id": "540efef8", + "metadata": {}, + "source": [ + "If the version returned is lower than the version indicated in our [production open-source code](https://github.com/validmind/validmind-library/blob/prod/validmind/__version__.py), restart your notebook and run:\n", + "\n", + "```bash\n", + "%pip install --upgrade validmind\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "9b8aa1cc", + "metadata": {}, + "source": [ + "You may need to restart your kernel after running the upgrade package for changes to be applied." + ] + }, + { + "cell_type": "markdown", + "id": "65ece5fb", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this first notebook, you learned how to:\n", + "\n", + "- [x] Register a model within the ValidMind Platform\n", + "- [x] Install and initialize the ValidMind Library\n", + "- [x] Preview the documentation template for your model\n", + "- [x] Explore the available tests offered by the ValidMind Library" + ] + }, + { + "cell_type": "markdown", + "id": "a262f940", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Start the model development process\n", + "\n", + "Now that the ValidMind Library is connected to your model in the ValidMind Library with the correct template applied, we can go ahead and start the model development process: **[2 — Start the model development process](2-start_development_process.ipynb)**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "name": "python", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb b/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb new file mode 100644 index 0000000000..e886dc7ff4 --- /dev/null +++ b/site/notebooks/EXECUTED/model_development/2-start_development_process.ipynb @@ -0,0 +1,997 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ValidMind for model development 2 — Start the model development process\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process with our series of four introductory notebooks. In this second notebook, you'll run tests and investigate results, then add the results or evidence to your documentation.\n", + "\n", + "You'll become familiar with the individual tests available in ValidMind, as well as how to run them and change parameters as necessary. Using ValidMind's repository of individual tests as building blocks helps you ensure that a model is being built appropriately. \n", + "\n", + "**For a full list of out-of-the-box tests,** refer to our [Test descriptions](https://docs.validmind.ai/developer/model-testing/test-descriptions.html) or try the interactive [Test sandbox](https://docs.validmind.ai/developer/model-testing/test-sandbox.html)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Prerequisites](#toc1_) \n", + "- [Setting up](#toc2_) \n", + " - [Initialize the ValidMind Library](#toc2_1_) \n", + " - [Import sample dataset](#toc2_2_) \n", + " - [Identify qualitative tests](#toc2_3_) \n", + " - [Initialize the ValidMind datasets](#toc2_4_) \n", + "- [Running tests](#toc3_) \n", + " - [Run tabular data tests](#toc3_1_) \n", + " - [Utilize test output](#toc3_2_) \n", + "- [Documenting test results](#toc4_) \n", + " - [Run and log multiple tests](#toc4_1_) \n", + " - [Run and log an individual test](#toc4_2_) \n", + " - [Add individual test results to model documentation](#toc4_2_1_) \n", + "- [Model testing](#toc5_) \n", + " - [Train simple logistic regression model](#toc5_1_) \n", + " - [Initialize model evaluation objects](#toc5_2_) \n", + " - [Assign predictions](#toc5_3_) \n", + " - [Run the model evaluation tests](#toc5_4_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Integrate custom tests](#toc7_1_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prerequisites\n", + "\n", + "In order to log test results or evidence to your model documentation with this notebook, you'll need to first have:\n", + "\n", + "- [x] Registered a model within the ValidMind Platform with a predefined documentation template\n", + "- [x] Installed the ValidMind Library in your local environment, allowing you to access all its features\n", + "\n", + "
Need help with the above steps?\n", + "

\n", + "Refer to the first notebook in this series: 1 — Set up the ValidMind Library
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setting up" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "First, let's connect up the ValidMind Library to our model we previously registered in the ValidMind Platform:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure the ValidMind Library is installed\n", + "\n", + "%pip install -q validmind\n", + "\n", + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Import sample dataset\n", + "\n", + "Then, let's import the public [Bank Customer Churn Prediction](https://www.kaggle.com/datasets/shantanudhakadd/bank-customer-churn-prediction) dataset from Kaggle. \n", + "\n", + "In our below example, note that: \n", + "\n", + "- The target column, `Exited` has a value of `1` when a customer has churned and `0` otherwise.\n", + "- The ValidMind Library provides a wrapper to automatically load the dataset as a Pandas DataFrame object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()\n", + "raw_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Identify qualitative tests\n", + "\n", + "Next, let's say we want to do some data quality assessments by running a few individual tests.\n", + "\n", + "Use the [`vm.tests.list_tests()` function](https://docs.validmind.ai/validmind/validmind/tests.html#list_tests) introduced by the first notebook in this series in combination with [`vm.tests.list_tags()`](https://docs.validmind.ai/validmind/validmind/tests.html#list_tags) and [`vm.tests.list_tasks()`](https://docs.validmind.ai/validmind/validmind/tests.html#list_tasks) to find which prebuilt tests are relevant for data quality assessment:\n", + "\n", + "- **`tasks`** represent the kind of modeling task associated with a test. Here we'll focus on `classification` tasks.\n", + "- **`tags`** are free-form descriptions providing more details about the test, for example, what category the test falls into. Here we'll focus on the `data_quality` tag.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the list of available task types\n", + "sorted(vm.tests.list_tasks())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the list of available tags\n", + "sorted(vm.tests.list_tags())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can pass `tags` and `tasks` as parameters to the `vm.tests.list_tests()` function to filter the tests based on the tags and task types.\n", + "\n", + "For example, to find tests related to tabular data quality for classification models, you can call `list_tests()` like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.list_tests(task=\"classification\", tags=[\"tabular_data\", \"data_quality\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Want to learn more about navigating ValidMind tests?\n", + "

\n", + "Refer to our notebook outlining the utilities available for viewing and understanding available ValidMind tests: Explore tests
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind datasets\n", + "\n", + "With the individual tests we want to run identified, the next step is to connect your data with a ValidMind `Dataset` object. **This step is always necessary every time you want to connect a dataset to documentation and produce test results through ValidMind,** but you only need to do it once per dataset.\n", + "\n", + "Initialize a ValidMind dataset object using the [`init_dataset` function](https://docs.validmind.ai/validmind/validmind.html#init_dataset) from the ValidMind (`vm`) module. For this example, we'll pass in the following arguments:\n", + "\n", + "- **`dataset`** — The raw dataset that you want to provide as input to tests.\n", + "- **`input_id`** — A unique identifier that allows tracking what inputs are used when running each individual test.\n", + "- **`target_column`** — A required argument if tests require access to true values. This is the name of the target column in the dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# vm_raw_dataset is now a VMDataset object that you can pass to any ValidMind test\n", + "vm_raw_dataset = vm.init_dataset(\n", + " dataset=raw_df,\n", + " input_id=\"raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Running tests\n", + "\n", + "Now that we know how to initialize a ValidMind `dataset` object, we're ready to run some tests!\n", + "\n", + "You run individual tests by calling [the `run_test` function](https://docs.validmind.ai/validmind/validmind/tests.html#run_test) provided by the `validmind.tests` module. For the examples below, we'll pass in the following arguments:\n", + "\n", + "- **`test_id`** — The ID of the test to run, as seen in the `ID` column when you run `list_tests`. \n", + "- **`params`** — A dictionary of parameters for the test. These will override any `default_params` set in the test definition. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run tabular data tests\n", + "\n", + "The inputs expected by a test can also be found in the test definition — let's take [`validmind.data_validation.DescriptiveStatistics`](https://docs.validmind.ai/tests/data_validation/DescriptiveStatistics.html) as an example.\n", + "\n", + "Note that the output of the [`describe_test()` function](https://docs.validmind.ai/validmind/validmind/tests.html#describe_test) below shows that this test expects a `dataset` as input:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.tests.describe_test(\"validmind.data_validation.DescriptiveStatistics\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's run a few tests to assess the quality of the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.DescriptiveStatistics\",\n", + " inputs={\"dataset\": vm_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result2 = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.ClassImbalance\",\n", + " inputs={\"dataset\": vm_raw_dataset},\n", + " params={\"min_percent_threshold\": 30},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output above shows that [the class imbalance test](https://docs.validmind.ai/tests/data_validation/ClassImbalance.html) did not pass according to the value we set for `min_percent_threshold`.\n", + "\n", + "To address this issue, we'll re-run the test on some processed data. In this case let's apply a very simple rebalancing technique to the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "raw_copy_df = raw_df.sample(frac=1) # Create a copy of the raw dataset\n", + "\n", + "# Create a balanced dataset with the same number of exited and not exited customers\n", + "exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 1]\n", + "not_exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 0].sample(n=exited_df.shape[0])\n", + "\n", + "balanced_raw_df = pd.concat([exited_df, not_exited_df])\n", + "balanced_raw_df = balanced_raw_df.sample(frac=1, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With this new balanced dataset, you can re-run the individual test to see if it now passes the class imbalance test requirement.\n", + "\n", + "As this is technically a different dataset, **remember to first initialize a new ValidMind `Dataset` object** to pass in as input as required by `run_test()`:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register new data and now 'balanced_raw_dataset' is the new dataset object of interest\n", + "vm_balanced_raw_dataset = vm.init_dataset(\n", + " dataset=balanced_raw_df,\n", + " input_id=\"balanced_raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the initialized `balanced_raw_dataset` as input into the test run\n", + "result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.ClassImbalance\",\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + " params={\"min_percent_threshold\": 30},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "### Utilize test output\n", + "\n", + "You can utilize the output from a ValidMind test for further use, for example, if you want to remove highly correlated features. Removing highly correlated features helps make the model simpler, more stable, and easier to understand.\n", + "\n", + "Below we demonstrate how to retrieve the list of features with the highest correlation coefficients and use them to reduce the final list of features for modeling.\n", + "\n", + "First, we'll run [`validmind.data_validation.HighPearsonCorrelation`](https://docs.validmind.ai/tests/data_validation/HighPearsonCorrelation.html) with the `balanced_raw_dataset` we initialized previously as input as is for comparison with later runs:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output above shows that the test did not pass according to the value we set for `max_threshold`.\n", + "\n", + "`corr_result` is an object of type `TestResult`. We can inspect the result object to see what the test has produced:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "print(type(corr_result))\n", + "print(\"Result ID: \", corr_result.result_id)\n", + "print(\"Params: \", corr_result.params)\n", + "print(\"Passed: \", corr_result.passed)\n", + "print(\"Tables: \", corr_result.tables)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's remove the highly correlated features and create a new VM `dataset` object.\n", + "\n", + "We'll begin by checking out the table in the result and extracting a list of features that failed the test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Extract table from `corr_result.tables`\n", + "features_df = corr_result.tables[0].data\n", + "features_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Extract list of features that failed the test\n", + "high_correlation_features = features_df[features_df[\"Pass/Fail\"] == \"Fail\"][\"Columns\"].tolist()\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, extract the feature names from the list of strings (example: `(Age, Exited)` > `Age`):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "high_correlation_features = [feature.split(\",\")[0].strip(\"()\") for feature in high_correlation_features]\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, it's time to re-initialize the dataset with the highly correlated features removed.\n", + "\n", + "**Note the use of a different `input_id`.** This allows tracking the inputs used when running each individual test." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Remove the highly correlated features from the dataset\n", + "balanced_raw_no_age_df = balanced_raw_df.drop(columns=high_correlation_features)\n", + "\n", + "# Re-initialize the dataset object\n", + "vm_raw_dataset_preprocessed = vm.init_dataset(\n", + " dataset=balanced_raw_no_age_df,\n", + " input_id=\"raw_dataset_preprocessed\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Re-running the test with the reduced feature set should pass the test:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also plot the correlation matrix to visualize the new correlation between features:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.PearsonCorrelationMatrix\",\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Documenting test results\n", + "\n", + "Now that we've done some analysis on two different datasets, we can use ValidMind to easily document why certain things were done to our raw data with testing to support it.\n", + "\n", + "Every test result returned by the `run_test()` function has a [`.log()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#TestResult.log) that can be used to send the test results to the ValidMind Platform:\n", + "\n", + "- When using `run_documentation_tests()`, documentation sections will be automatically populated with the results of all tests registered in the documentation template.\n", + "- When logging individual test results to the platform, you'll need to manually add those results to the desired section of the model documentation.\n", + "\n", + "To demonstrate how to add test results to your model documentation, we'll populate the entire **Data Preparation** section of the documentation using the clean `vm_raw_dataset_preprocessed` dataset as input, and then document an additional individual result for the highly correlated dataset `vm_balanced_raw_dataset`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "### Run and log multiple tests\n", + "\n", + "[`run_documentation_tests()`](https://docs.validmind.ai/validmind/validmind.html#run_documentation_tests) allows you to run multiple tests at once and automatically log the results to your documentation. Below, we'll run the tests using the previously initialized `vm_raw_dataset_preprocessed` as input — this will populate the entire **Data Preparation** section for every test that is part of the documentation template.\n", + "\n", + "For this example, we'll pass in the following arguments:\n", + "\n", + "- **`inputs`:** Any inputs to be passed to the tests.\n", + "- **`config`:** A dictionary `:` that allows configuring each test individually. Each test config requires the following:\n", + " - **`params`:** Individual test parameters.\n", + " - **`inputs`:** Individual test inputs. This overrides any inputs passed from the `run_documentation_tests()` function.\n", + "\n", + "When including explicit configuration for individual tests, you'll need to specify the `inputs` even if they mirror what is included in your global configuration.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Individual test config with inputs specified\n", + "test_config = {\n", + " \"validmind.data_validation.ClassImbalance\": {\n", + " \"params\": {\"min_percent_threshold\": 30},\n", + " \"inputs\": {\"dataset\": vm_raw_dataset_preprocessed},\n", + " },\n", + " \"validmind.data_validation.HighPearsonCorrelation\": {\n", + " \"params\": {\"max_threshold\": 0.3},\n", + " \"inputs\": {\"dataset\": vm_raw_dataset_preprocessed},\n", + " },\n", + "}\n", + "\n", + "# Global test config\n", + "tests_suite = vm.run_documentation_tests(\n", + " inputs={\n", + " \"dataset\": vm_raw_dataset_preprocessed,\n", + " },\n", + " config=test_config,\n", + " section=[\"data_preparation\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run and log an individual test\n", + "\n", + "Next, we'll use the previously initialized `vm_balanced_raw_dataset` (that still has a highly correlated `Age` column) as input to run an individual test, then log the result to the ValidMind Platform.\n", + "\n", + "When running individual tests, **you can use a custom `result_id` to tag the individual result with a unique identifier:** \n", + "\n", + "- This `result_id` can be appended to `test_id` with a `:` separator.\n", + "- The `balanced_raw_dataset` result identifier will correspond to the `balanced_raw_dataset` input, the dataset that still has the `Age` column." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation:balanced_raw_dataset\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")\n", + "result.log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Note the output returned indicating that a test-driven block doesn't currently exist in your model's documentation for this particular test ID. \n", + "

\n", + "That's expected, as when we run individual tests the results logged need to be manually added to your documentation within the ValidMind Platform.
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Add individual test results to model documentation\n", + "\n", + "With the test results logged, let's head to the model we connected to at the beginning of this notebook and insert our test results into the documentation ([Need more help?](https://docs.validmind.ai/developer/model-documentation/work-with-test-results.html)):\n", + "\n", + "1. From the **Inventory** in the ValidMind Platform, go to the model you connected to earlier.\n", + "\n", + "2. In the left sidebar that appears for your model, click **Documentation**.\n", + "\n", + "3. Locate the Data Preparation section and click on **2.3. Correlations and Interactions** to expand that section.\n", + "\n", + "4. Hover under the Pearson Correlation Matrix content block until a horizontal dashed line with a **+** button appears, indicating that you can insert a new block.\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "5. Click **+** and then select **Test-Driven Block**:\n", + "\n", + " - In the search bar, type in `HighPearsonCorrelation`.\n", + " - Select `HighPearsonCorrelation:balanced_raw_dataset` as the test.\n", + "\n", + " A preview of the test gets shown:\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "6. Finally, click **Insert 1 Test Result to Document** to add the test result to the documentation.\n", + "\n", + " Confirm that the individual results for the high correlation test has been correctly inserted into section **2.3. Correlations and Interactions** of the documentation.\n", + "\n", + "7. Finalize the documentation by editing the test result's description block to explain the changes you made to the raw data and the reasons behind them as shown in the screenshot below:\n", + "\n", + " \"Screenshot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Model testing\n", + "\n", + "So far, we've focused on the data assessment and pre-processing that usually occurs prior to any models being built. Now, let's instead assume we have already built a model and we want to incorporate some model results into our documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Train simple logistic regression model\n", + "\n", + "Using ValidMind tests, we'll train a simple logistic regression model on our dataset and evaluate its performance by using the `LogisticRegression` class from the `sklearn.linear_model`.\n", + "\n", + "To start, let's grab the first few rows from the `balanced_raw_no_age_df` dataset with the highly correlated features removed we initialized earlier:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before training the model, we need to encode the categorical features in the dataset:\n", + "\n", + "- Use the `OneHotEncoder` class from the `sklearn.preprocessing` module to encode the categorical features.\n", + "- The categorical features in the dataset are `Geography` and `Gender`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "balanced_raw_no_age_df = pd.get_dummies(\n", + " balanced_raw_no_age_df, columns=[\"Geography\", \"Gender\"], drop_first=True\n", + ")\n", + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll split our preprocessed dataset into training and testing, to help assess how well the model generalizes to unseen data:\n", + "\n", + "- We start by dividing our `balanced_raw_no_age_df` dataset into training and test subsets using `train_test_split`, with 80% of the data allocated to training (`train_df`) and 20% to testing (`test_df`).\n", + "- From each subset, we separate the features (all columns except \"Exited\") into `X_train` and `X_test`, and the target column (\"Exited\") into `y_train` and `y_test`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.model_selection import train_test_split\n", + "\n", + "train_df, test_df = train_test_split(balanced_raw_no_age_df, test_size=0.20)\n", + "\n", + "X_train = train_df.drop(\"Exited\", axis=1)\n", + "y_train = train_df[\"Exited\"]\n", + "X_test = test_df.drop(\"Exited\", axis=1)\n", + "y_test = test_df[\"Exited\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then using `GridSearchCV`, we'll find the best-performing hyperparameters or settings and save them:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "\n", + "# Logistic Regression grid params\n", + "log_reg_params = {\n", + " \"penalty\": [\"l1\", \"l2\"],\n", + " \"C\": [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n", + " \"solver\": [\"liblinear\"],\n", + "}\n", + "\n", + "# Grid search for Logistic Regression\n", + "from sklearn.model_selection import GridSearchCV\n", + "\n", + "grid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\n", + "grid_log_reg.fit(X_train, y_train)\n", + "\n", + "# Logistic Regression best estimator\n", + "log_reg = grid_log_reg.best_estimator_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize model evaluation objects\n", + "\n", + "The last step for evaluating the model's performance is to initialize the ValidMind `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the datasets into their own dataset objects\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset_final\",\n", + " dataset=train_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset_final\",\n", + " dataset=test_df,\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You'll also need to initialize a ValidMind model object (`vm_model`) that can be passed to other functions for analysis and tests on the data for each of our three models.\n", + "\n", + "You simply initialize this model object with [`vm.init_model()`](https://docs.validmind.ai/validmind/validmind.html#init_model):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register the model\n", + "vm_model = vm.init_model(log_reg, input_id=\"log_reg_model_v1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Assign predictions\n", + "\n", + "Once the model has been registered you can assign model predictions to the training and test datasets.\n", + "\n", + "- The [`assign_predictions()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#assign_predictions) from the `Dataset` object can link existing predictions to any number of models.\n", + "- This method links the model's class prediction values and probabilities to our `vm_train_ds` and `vm_test_ds` datasets.\n", + "\n", + "If no prediction values are passed, the method will compute predictions automatically:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(model=vm_model)\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Run the model evaluation tests\n", + "\n", + "In this next example, we'll focus on running the tests within the Model Development section of the model documentation. Only tests associated with this section will be executed, and the corresponding results will be updated in the model documentation.\n", + "\n", + "- Note the additional config that is passed to `run_documentation_tests()` — this allows you to override `inputs` or `params` in certain tests.\n", + "- In our case, we want to explicitly use the `vm_train_ds` for the [`validmind.model_validation.sklearn.ClassifierPerformance:in_sample` test](https://docs.validmind.ai/tests/model_validation/sklearn/ClassifierPerformance.html), since it's supposed to run on the training dataset and not the test dataset.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_config = {\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:in_sample\": {\n", + " \"inputs\": {\n", + " \"dataset\": vm_train_ds,\n", + " \"model\": vm_model,\n", + " },\n", + " }\n", + "}\n", + "results = vm.run_documentation_tests(\n", + " section=[\"model_development\"],\n", + " inputs={\n", + " \"dataset\": vm_test_ds, # Any test that requires a single dataset will use vm_test_ds\n", + " \"model\": vm_model,\n", + " \"datasets\": (\n", + " vm_train_ds,\n", + " vm_test_ds,\n", + " ), # Any test that requires multiple datasets will use vm_train_ds and vm_test_ds\n", + " },\n", + " config=test_config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this second notebook, you learned how to:\n", + "\n", + "- [x] Import a sample dataset\n", + "- [x] Identify which tests you might want to run with ValidMind\n", + "- [x] Initialize ValidMind datasets\n", + "- [x] Run individual tests\n", + "- [x] Utilize the output from tests you've run\n", + "- [x] Log test results from sets of or individual tests as evidence to the ValidMind Platform\n", + "- [x] Add supplementary individual test results to your documentation\n", + "- [x] Assign model predictions to your ValidMind model objects\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Integrate custom tests\n", + "\n", + "Now that you're familiar with the basics of using the ValidMind Library to run and log tests to provide evidence for your model documentation, let's learn how to incorporate your own custom tests into ValidMind: **[3 — Integrate custom tests](3-integrate_custom_tests.ipynb)**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "name": "python", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb b/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb new file mode 100644 index 0000000000..038f45c38a --- /dev/null +++ b/site/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb @@ -0,0 +1,987 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ValidMind for model development 3 — Integrate custom tests\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process with our series of four introductory notebooks. In this third notebook, supplement ValidMind tests with your own and include them as additional evidence in your documentation.\n", + "\n", + "This notebook assumes that you already have a repository of custom made tests considered critical to include in your documentation. A custom test is any function that takes a set of inputs and parameters as arguments and returns one or more outputs:\n", + "\n", + "- The function can be as simple or as complex as you need it to be — it can use external libraries, make API calls, or do anything else that you can do in Python.\n", + "- The only requirement is that the function signature and return values can be \"understood\" and handled by the ValidMind Library. As such, custom tests offer added flexibility by extending the default tests provided by ValidMind, enabling you to document any type of model or use case.\n", + "\n", + "**For a more in-depth introduction to custom tests,** refer to our [Implement custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb) notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Prerequisites](#toc1_) \n", + "- [Setting up](#toc2_) \n", + " - [Initialize the ValidMind Library](#toc2_1_) \n", + " - [Import sample dataset](#toc2_2_) \n", + " - [Remove highly correlated features](#toc2_2_1_) \n", + " - [Train the model](#toc2_3_) \n", + " - [Initialize the ValidMind objects](#toc2_3_1_) \n", + " - [Assign predictions](#toc2_3_2_) \n", + "- [Implementing a custom inline test](#toc3_) \n", + " - [Create a confusion matrix plot](#toc3_1_) \n", + " - [Add parameters to custom tests](#toc3_2_) \n", + " - [Pass parameters to custom tests](#toc3_3_) \n", + " - [Log the confusion matrix results](#toc3_4_) \n", + "- [Using external test providers](#toc4_) \n", + " - [Create custom tests folder](#toc4_1_) \n", + " - [Save an inline test](#toc4_2_) \n", + " - [Register a local test provider](#toc4_3_) \n", + " - [Initialize a local test provider](#toc4_3_1_) \n", + " - [Run test provider tests](#toc4_3_2_) \n", + "- [Add test results to documentation](#toc5_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Finalize testing and documentation](#toc7_1_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prerequisites\n", + "\n", + "In order to integrate custom tests with your model documentation with this notebook, you'll need to first have:\n", + "\n", + "- [x] Registered a model within the ValidMind Platform with a predefined documentation template\n", + "- [x] Installed the ValidMind Library in your local environment, allowing you to access all its features\n", + "- [x] Learned how to import and initialize datasets for use with ValidMind\n", + "- [x] Understood the basics of how to run and log tests with ValidMind\n", + "- [x] Inserted a test-driven block for the results of your `HighPearsonCorrelation:balanced_raw_dataset` test into your model's documentation\n", + "\n", + "
Need help with the above steps?\n", + "

\n", + "Refer to the first two notebooks in this series:\n", + "\n", + "- 1 — Set up the ValidMind Library\n", + "- 2 — Start the model development process\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up\n", + "\n", + "This section should be quite familiar to you — as we performed the same actions in the previous notebook, **[2 — Start the model development process](2-start_development_process.ipynb)**." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "As usual, let's first connect up the ValidMind Library to our model we previously registered in the ValidMind Platform:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure the ValidMind Library is installed\n", + "\n", + "%pip install -q validmind\n", + "\n", + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Import sample dataset\n", + "\n", + "Next, we'll import the same public [Bank Customer Churn Prediction](https://www.kaggle.com/datasets/shantanudhakadd/bank-customer-churn-prediction) dataset from Kaggle we used in the last notebook so that we have something to work with:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll apply a simple rebalancing technique to the dataset before continuing:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "raw_copy_df = raw_df.sample(frac=1) # Create a copy of the raw dataset\n", + "\n", + "# Create a balanced dataset with the same number of exited and not exited customers\n", + "exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 1]\n", + "not_exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 0].sample(n=exited_df.shape[0])\n", + "\n", + "balanced_raw_df = pd.concat([exited_df, not_exited_df])\n", + "balanced_raw_df = balanced_raw_df.sample(frac=1, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Remove highly correlated features\n", + "\n", + "Let's also quickly remove highly correlated features from the dataset using the output from a ValidMind test.\n", + "\n", + "As you learned previously, before we can run tests you'll need to initialize a ValidMind dataset object:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register new data and now 'balanced_raw_dataset' is the new dataset object of interest\n", + "vm_balanced_raw_dataset = vm.init_dataset(\n", + " dataset=balanced_raw_df,\n", + " input_id=\"balanced_raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our balanced dataset initialized, we can then run our test and utilize the output to help us identify the features we want to remove:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run HighPearsonCorrelation test with our balanced dataset as input and return a result object\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# From result object, extract table from `corr_result.tables`\n", + "features_df = corr_result.tables[0].data\n", + "features_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract list of features that failed the test\n", + "high_correlation_features = features_df[features_df[\"Pass/Fail\"] == \"Fail\"][\"Columns\"].tolist()\n", + "high_correlation_features" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract feature names from the list of strings\n", + "high_correlation_features = [feature.split(\",\")[0].strip(\"()\") for feature in high_correlation_features]\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then re-initialize the dataset with a different `input_id` and the highly correlated features removed and re-run the test for confirmation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Remove the highly correlated features from the dataset\n", + "balanced_raw_no_age_df = balanced_raw_df.drop(columns=high_correlation_features)\n", + "\n", + "# Re-initialize the dataset object\n", + "vm_raw_dataset_preprocessed = vm.init_dataset(\n", + " dataset=balanced_raw_no_age_df,\n", + " input_id=\"raw_dataset_preprocessed\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Re-run the test with the reduced feature set\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Train the model\n", + "\n", + "We'll then use ValidMind tests to train a simple logistic regression model on our prepared dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First encode the categorical features in our dataset with the highly correlated features removed\n", + "balanced_raw_no_age_df = pd.get_dummies(\n", + " balanced_raw_no_age_df, columns=[\"Geography\", \"Gender\"], drop_first=True\n", + ")\n", + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Split the processed dataset into train and test\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "train_df, test_df = train_test_split(balanced_raw_no_age_df, test_size=0.20)\n", + "\n", + "X_train = train_df.drop(\"Exited\", axis=1)\n", + "y_train = train_df[\"Exited\"]\n", + "X_test = test_df.drop(\"Exited\", axis=1)\n", + "y_test = test_df[\"Exited\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "\n", + "# Logistic Regression grid params\n", + "log_reg_params = {\n", + " \"penalty\": [\"l1\", \"l2\"],\n", + " \"C\": [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n", + " \"solver\": [\"liblinear\"],\n", + "}\n", + "\n", + "# Grid search for Logistic Regression\n", + "from sklearn.model_selection import GridSearchCV\n", + "\n", + "grid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\n", + "grid_log_reg.fit(X_train, y_train)\n", + "\n", + "# Logistic Regression best estimator\n", + "log_reg = grid_log_reg.best_estimator_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the ValidMind objects\n", + "\n", + "Let's initialize the ValidMind `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the datasets into their own dataset objects\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset_final\",\n", + " dataset=train_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset_final\",\n", + " dataset=test_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "# Initialize a model object\n", + "vm_model = vm.init_model(log_reg, input_id=\"log_reg_model_v1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Assign predictions\n", + "\n", + "Once the model is registered, we'll assign predictions to the training and test datasets:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(model=vm_model)\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Implementing a custom inline test\n", + "\n", + "With the set up out of the way, let's implement a custom *inline test* that calculates the confusion matrix for a binary classification model.\n", + "\n", + "- An inline test refers to a test written and executed within the same environment as the code being tested — in this case, right in this Jupyter Notebook — without requiring a separate test file or framework.\n", + "- You'll note that the custom test function is just a regular Python function that can include and require any Python library as you see fit." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Create a confusion matrix plot\n", + "\n", + "Let's first create a confusion matrix plot using the `confusion_matrix` function from the `sklearn.metrics` module:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "\n", + "# Get the predicted classes\n", + "y_pred = log_reg.predict(vm_test_ds.x)\n", + "\n", + "confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\n", + "\n", + "cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + ")\n", + "cm_display.plot()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, create a [`@vm.test` wrapper](https://docs.validmind.ai/validmind/validmind.html#test) that will allow you to create a reusable test. **Note the following changes in the code below:**\n", + "\n", + "- The function `confusion_matrix` takes two arguments `dataset` and `model`. This is a `VMDataset` and `VMModel` object respectively.\n", + " - `VMDataset` objects allow you to access the dataset's true (target) values by accessing the `.y` attribute.\n", + " - `VMDataset` objects allow you to access the predictions for a given model by accessing the `.y_pred()` method.\n", + "- The function docstring provides a description of what the test does. This will be displayed along with the result in this notebook as well as in the ValidMind Platform.\n", + "- The function body calculates the confusion matrix using the `sklearn.metrics.confusion_matrix` function as we just did above.\n", + "- The function then returns the `ConfusionMatrixDisplay.figure_` object — this is important as the ValidMind Library expects the output of the custom test to be a plot or a table.\n", + "- The `@vm.test` decorator is doing the work of creating a wrapper around the function that will allow it to be run by the ValidMind Library. It also registers the test so it can be found by the ID `my_custom_tests.ConfusionMatrix`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can now run the newly created custom test on both the training and test datasets using the [`run_test()` function](https://docs.validmind.ai/validmind/validmind/tests.html#run_test):\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "# Training dataset\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:training_dataset\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_train_ds},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test dataset\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Add parameters to custom tests\n", + "\n", + "Custom tests can take parameters just like any other function. To demonstrate, let's modify the `confusion_matrix` function to take an additional parameter `normalize` that will allow you to normalize the confusion matrix:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model, normalize=False):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " if normalize:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred, normalize=\"all\")\n", + " else:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Pass parameters to custom tests\n", + "\n", + "You can pass parameters to custom tests by providing a dictionary of parameters to the `run_test()` function.\n", + "\n", + "- The parameters will override any default parameters set in the custom test definition. Note that `dataset` and `model` are still passed as `inputs`.\n", + "- Since these are `VMDataset` or `VMModel` inputs, they have a special meaning.\n", + "- When declaring a `dataset`, `model`, `datasets` or `models` argument in a custom test function, the ValidMind Library will expect these get passed as `inputs` to `run_test()` or `run_documentation_tests()`.\n", + "\n", + "Re-running the confusion matrix with `normalize=True` and our testing dataset looks like this:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test dataset with normalize=True\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset_normalized\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + " params={\"normalize\": True}\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Log the confusion matrix results\n", + "\n", + "As we learned in **[2 — Start the model development process](2-start_development_process.ipynb)** under **Documenting results** > **Run and log an individual tests**, you can log any result to the ValidMind Platform with the [`.log()` method](https://docs.validmind.ai/validmind/validmind/vm_models.html#TestResult.log) of the result object, allowing you to then add the result to the documentation.\n", + "\n", + "You can now do the same for the confusion matrix results:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "result.log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Note the output returned indicating that a test-driven block doesn't currently exist in your model's documentation for this particular test ID. \n", + "

\n", + "That's expected, as when we run individual tests the results logged need to be manually added to your documentation within the ValidMind Platform.
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Using external test providers\n", + "\n", + "Creating inline custom tests with a function is a great way to customize your model documentation. However, sometimes you may want to reuse the same set of tests across multiple models and share them with others in your organization. In this case, you can create an external custom *test provider* that will allow you to load custom tests from a local folder or a Git repository.\n", + "\n", + "In this section you will learn how to declare a local filesystem test provider that allows loading tests from a local folder following these high level steps:\n", + "\n", + "1. Create a folder of custom tests from existing inline tests (tests that exist in your active Jupyter Notebook)\n", + "2. Save an inline test to a file\n", + "3. Define and register a [`LocalTestProvider`](https://docs.validmind.ai/validmind/validmind/tests.html#LocalTestProvider) that points to that folder\n", + "4. Run test provider tests\n", + "5. Add the test results to your documentation\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Create custom tests folder\n", + "\n", + "Let's start by creating a new folder that will contain reusable custom tests from your existing inline tests.\n", + "\n", + "The following code snippet will create a new `my_tests` directory in the current working directory if it doesn't exist:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "tests_folder = \"my_tests\"\n", + "\n", + "import os\n", + "\n", + "# create tests folder\n", + "os.makedirs(tests_folder, exist_ok=True)\n", + "\n", + "# remove existing tests\n", + "for f in os.listdir(tests_folder):\n", + " # remove files and pycache\n", + " if f.endswith(\".py\") or f == \"__pycache__\":\n", + " os.system(f\"rm -rf {tests_folder}/{f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After running the command above, confirm that a new `my_tests` directory was created successfully. For example:\n", + "\n", + "```\n", + "~/notebooks/tutorials/model_development/my_tests/\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Save an inline test\n", + "\n", + "The `@vm.test` decorator we used in **Implementing a custom inline test** above to register one-off custom tests also includes a convenience method on the function object that allows you to simply call `.save()` to save the test to a Python file at a specified path.\n", + "\n", + "While `save()` will get you started by creating the file and saving the function code with the correct name, it won't automatically include any imports, or other functions or variables, outside of the functions that are needed for the test to run. To solve this, pass in an optional `imports` argument ensuring necessary imports are added to the file.\n", + "\n", + "The `confusion_matrix` test requires the following additional imports:\n", + "\n", + "```python\n", + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "```\n", + "\n", + "Let's pass these imports to the `save()` method to ensure they are included in the file with the following command:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "confusion_matrix.save(\n", + " # Save it to the custom tests folder we created\n", + " tests_folder,\n", + " imports=[\"import matplotlib.pyplot as plt\", \"from sklearn import metrics\"],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- [x] Confirm that the `save()` method saved the `confusion_matrix` function to a file named `ConfusionMatrix.py` in the `my_tests` folder.\n", + "- [x] Note that the new file provides some context on the origin of the test, which is useful for traceability:\n", + "\n", + " ```\n", + " # Saved from __main__.confusion_matrix\n", + " # Original Test ID: my_custom_tests.ConfusionMatrix\n", + " # New Test ID: .ConfusionMatrix\n", + " ```\n", + "\n", + "- [x] Additionally, the new test function has been stripped off its decorator, as it now resides in a file that will be loaded by the test provider:\n", + "\n", + " ```python\n", + " def ConfusionMatrix(dataset, model, normalize=False):\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Register a local test provider\n", + "\n", + "Now that your `my_tests` folder has a sample custom test, let's initialize a test provider that will tell the ValidMind Library where to find your custom tests:\n", + "\n", + "- ValidMind offers out-of-the-box test providers for local tests (tests in a folder) or a Github provider for tests in a Github repository.\n", + "- You can also create your own test provider by creating a class that has a [`load_test` method](https://docs.validmind.ai/validmind/validmind/tests.html#load_test) that takes a test ID and returns the test function matching that ID.\n", + "\n", + "
Want to learn more about test providers?\n", + "

\n", + "An extended introduction to test providers can be found in: Integrate external test providers
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize a local test provider\n", + "\n", + "For most use cases, using a `LocalTestProvider` that allows you to load custom tests from a designated directory should be sufficient.\n", + "\n", + "**The most important attribute for a test provider is its `namespace`.** This is a string that will be used to prefix test IDs in model documentation. This allows you to have multiple test providers with tests that can even share the same ID, but are distinguished by their namespace.\n", + "\n", + "Let's go ahead and load the custom tests from our `my_tests` directory:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.tests import LocalTestProvider\n", + "\n", + "# initialize the test provider with the tests folder we created earlier\n", + "my_test_provider = LocalTestProvider(tests_folder)\n", + "\n", + "vm.tests.register_test_provider(\n", + " namespace=\"my_test_provider\",\n", + " test_provider=my_test_provider,\n", + ")\n", + "# `my_test_provider.load_test()` will be called for any test ID that starts with `my_test_provider`\n", + "# e.g. `my_test_provider.ConfusionMatrix` will look for a function named `ConfusionMatrix` in `my_tests/ConfusionMatrix.py` file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Run test provider tests\n", + "\n", + "Now that we've set up the test provider, we can run any test that's located in the tests folder by using the `run_test()` method as with any other test:\n", + "\n", + "- For tests that reside in a test provider directory, the test ID will be the `namespace` specified when registering the provider, followed by the path to the test file relative to the tests folder.\n", + "- For example, the Confusion Matrix test we created earlier will have the test ID `my_test_provider.ConfusionMatrix`. You could organize the tests in subfolders, say `classification` and `regression`, and the test ID for the Confusion Matrix test would then be `my_test_provider.classification.ConfusionMatrix`.\n", + "\n", + "Let's go ahead and re-run the confusion matrix test with our testing dataset by using the test ID `my_test_provider.ConfusionMatrix`. This should load the test from the test provider and run it as before.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "result = vm.tests.run_test(\n", + " \"my_test_provider.ConfusionMatrix\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + " params={\"normalize\": True},\n", + ")\n", + "\n", + "result.log()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
Again, note the output returned indicating that a test-driven block doesn't currently exist in your model's documentation for this particular test ID. \n", + "

\n", + "That's expected, as when we run individual tests the results logged need to be manually added to your documentation within the ValidMind Platform.
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Add test results to documentation\n", + "\n", + "With our custom tests run and results logged to the ValidMind Platform, let's head to the model we connected to at the beginning of this notebook and insert our test results into the documentation ([Need more help?](https://docs.validmind.ai/developer/model-documentation/work-with-test-results.html)):\n", + "\n", + "1. From the **Inventory** in the ValidMind Platform, go to the model you connected to earlier.\n", + "\n", + "2. In the left sidebar that appears for your model, click **Documentation**.\n", + "\n", + "3. Locate the Data Preparation section and click on **3.2. Model Evaluation** to expand that section.\n", + "\n", + "4. Hover under the Pearson Correlation Matrix content block until a horizontal dashed line with a **+** button appears, indicating that you can insert a new block.\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "5. Click **+** and then select **Test-Driven Block**:\n", + "\n", + " - In the search bar, type in `ConfusionMatrix`.\n", + " - Select the custom `ConfusionMatrix` tests you logged above:\n", + "\n", + " \"Screenshot\n", + "

\n", + "\n", + "6. Finally, click **Insert 2 Test Results to Document** to add the test results to the documentation.\n", + "\n", + " Confirm that the two individual results for the confusion matrix tests have been correctly inserted into section **3.2. Model Evaluation** of the documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this third notebook, you learned how to:\n", + "\n", + "- [x] Implement a custom inline test\n", + "- [x] Run and log your custom inline tests\n", + "- [x] Use external custom test providers\n", + "- [x] Run and log tests from your custom test providers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Finalize testing and documentation\n", + "\n", + "Now that you're proficient at using the ValidMind Library to run and log tests, let's put the last pieces in place to prepare our fully documented sample model for review: **[4 — Finalize testing and documentation](4-finalize_testing_documentation.ipynb)**" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb b/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb new file mode 100644 index 0000000000..13a4f1e148 --- /dev/null +++ b/site/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.ipynb @@ -0,0 +1,966 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ValidMind for model development 4 — Finalize testing and documentation\n", + "\n", + "Learn how to use ValidMind for your end-to-end model documentation process with our introductory notebook series. In this last notebook, finalize the testing and documentation of your model and have a fully documented sample model ready for review.\n", + "\n", + "We'll first use [`run_documentation_tests()`](https://docs.validmind.ai/validmind/validmind.html#run_documentation_tests) previously covered in **[2 — Start the model development process](2-start_development_process.ipynb)** to ensure that your custom test results generated in **[3 — Integrate custom tests](3-integrate_custom_tests.ipynb)** are included in your documentation. Then, we'll view and update the configuration for the entire model documentation template to suit your needs.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "::: {.content-hidden when-format=\"html\"}\n", + "## Contents \n", + "- [Prerequisites](#toc1_) \n", + "- [Setting up](#toc2_) \n", + " - [Initialize the ValidMind Library](#toc2_1_) \n", + " - [Import sample dataset](#toc2_2_) \n", + " - [Remove highly correlated features](#toc2_2_1_) \n", + " - [Train the model](#toc2_3_) \n", + " - [Initialize the ValidMind objects](#toc2_3_1_) \n", + " - [Assign predictions](#toc2_3_2_) \n", + " - [Add custom tests](#toc2_4_) \n", + " - [Implement custom inline test](#toc2_4_1_) \n", + " - [Add a local test provider](#toc2_4_2_) \n", + "- [Reconnect to ValidMind](#toc3_) \n", + "- [Include custom test results](#toc4_) \n", + "- [Documentation template configuration](#toc5_) \n", + " - [Update the config](#toc5_1_) \n", + "- [In summary](#toc6_) \n", + "- [Next steps](#toc7_) \n", + " - [Work with your model documentation](#toc7_1_) \n", + " - [Learn more](#toc7_2_) \n", + " - [Use cases](#toc7_2_1_) \n", + " - [More how-to guides and code samples](#toc7_2_2_) \n", + " - [Discover more learning resources](#toc7_2_3_) \n", + "\n", + ":::\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Prerequisites\n", + "\n", + "In order to finalize the testing and documentation for your sample model, you'll need to first have:\n", + "\n", + "- [x] Registered a model within the ValidMind Platform with a predefined documentation template\n", + "- [x] Installed the ValidMind Library in your local environment, allowing you to access all its features\n", + "- [x] Learned how to import and initialize datasets for use with ValidMind\n", + "- [x] Learned how to run and log default and custom tests with ValidMind, including from external test providers\n", + "- [x] Inserted test-driven blocks for the results of the following tests into your model's documentation:\n", + " - [x] `HighPearsonCorrelation:balanced_raw_dataset`\n", + " - [x] `my_test_provider.ConfusionMatrix`\n", + " - [x] `my_custom_tests.ConfusionMatrix:test_dataset_normalized`\n", + "\n", + "
Need help with the above steps?\n", + "

\n", + "Refer to the first three notebooks in this series:\n", + "\n", + "- 1 — Set up the ValidMind Library\n", + "- 2 — Start the model development process\n", + "- 3 — Integrate custom tests\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Setting up\n", + "\n", + "This section should be very familiar to you now — as we performed the same actions in the previous two notebooks in this series." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Initialize the ValidMind Library\n", + "\n", + "As usual, let's first connect up the ValidMind Library to our model we previously registered in the ValidMind Platform:\n", + "\n", + "1. In a browser, [log in to ValidMind](https://docs.validmind.ai/guide/configuration/log-in-to-validmind.html).\n", + "\n", + "2. In the left sidebar, navigate to **Inventory** and select the model you registered for this \"ValidMind for model development\" series of notebooks.\n", + "\n", + "3. Go to **Getting Started** and click **Copy snippet to clipboard**.\n", + "\n", + "Next, [load your model identifier credentials from an `.env` file](https://docs.validmind.ai/developer/model-documentation/store-credentials-in-env-file.html) or replace the placeholder with your own code snippet:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure the ValidMind Library is installed\n", + "\n", + "%pip install -q validmind\n", + "\n", + "# Load your model identifier credentials from an `.env` file\n", + "\n", + "%load_ext dotenv\n", + "%dotenv .env\n", + "\n", + "# Or replace with your code snippet\n", + "\n", + "import validmind as vm\n", + "\n", + "vm.init(\n", + " # api_host=\"...\",\n", + " # api_key=\"...\",\n", + " # api_secret=\"...\",\n", + " # model=\"...\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Import sample dataset\n", + "\n", + "Next, we'll import the same public [Bank Customer Churn Prediction](https://www.kaggle.com/datasets/shantanudhakadd/bank-customer-churn-prediction) dataset from Kaggle we used in the last notebooks so that we have something to work with:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn as demo_dataset\n", + "\n", + "print(\n", + " f\"Loaded demo dataset with: \\n\\n\\t• Target column: '{demo_dataset.target_column}' \\n\\t• Class labels: {demo_dataset.class_labels}\"\n", + ")\n", + "\n", + "raw_df = demo_dataset.load_data()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll apply a simple rebalancing technique to the dataset before continuing:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "raw_copy_df = raw_df.sample(frac=1) # Create a copy of the raw dataset\n", + "\n", + "# Create a balanced dataset with the same number of exited and not exited customers\n", + "exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 1]\n", + "not_exited_df = raw_copy_df.loc[raw_copy_df[\"Exited\"] == 0].sample(n=exited_df.shape[0])\n", + "\n", + "balanced_raw_df = pd.concat([exited_df, not_exited_df])\n", + "balanced_raw_df = balanced_raw_df.sample(frac=1, random_state=42)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Remove highly correlated features\n", + "\n", + "Let's also quickly remove highly correlated features from the dataset using the output from a ValidMind test.\n", + "\n", + "As you learned previously, before we can run tests you'll need to initialize a ValidMind dataset object:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register new data and now 'balanced_raw_dataset' is the new dataset object of interest\n", + "vm_balanced_raw_dataset = vm.init_dataset(\n", + " dataset=balanced_raw_df,\n", + " input_id=\"balanced_raw_dataset\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "With our balanced dataset initialized, we can then run our test and utilize the output to help us identify the features we want to remove:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run HighPearsonCorrelation test with our balanced dataset as input and return a result object\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_balanced_raw_dataset},\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# From result object, extract table from `corr_result.tables`\n", + "features_df = corr_result.tables[0].data\n", + "features_df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract list of features that failed the test\n", + "high_correlation_features = features_df[features_df[\"Pass/Fail\"] == \"Fail\"][\"Columns\"].tolist()\n", + "high_correlation_features" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract feature names from the list of strings\n", + "high_correlation_features = [feature.split(\",\")[0].strip(\"()\") for feature in high_correlation_features]\n", + "high_correlation_features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can then re-initialize the dataset with a different `input_id` and the highly correlated features removed and re-run the test for confirmation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Remove the highly correlated features from the dataset\n", + "balanced_raw_no_age_df = balanced_raw_df.drop(columns=high_correlation_features)\n", + "\n", + "# Re-initialize the dataset object\n", + "vm_raw_dataset_preprocessed = vm.init_dataset(\n", + " dataset=balanced_raw_no_age_df,\n", + " input_id=\"raw_dataset_preprocessed\",\n", + " target_column=\"Exited\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Re-run the test with the reduced feature set\n", + "corr_result = vm.tests.run_test(\n", + " test_id=\"validmind.data_validation.HighPearsonCorrelation\",\n", + " params={\"max_threshold\": 0.3},\n", + " inputs={\"dataset\": vm_raw_dataset_preprocessed},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Train the model\n", + "\n", + "We'll then use ValidMind tests to train a simple logistic regression model on our prepared dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First encode the categorical features in our dataset with the highly correlated features removed\n", + "balanced_raw_no_age_df = pd.get_dummies(\n", + " balanced_raw_no_age_df, columns=[\"Geography\", \"Gender\"], drop_first=True\n", + ")\n", + "balanced_raw_no_age_df.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Split the processed dataset into train and test\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "train_df, test_df = train_test_split(balanced_raw_no_age_df, test_size=0.20)\n", + "\n", + "X_train = train_df.drop(\"Exited\", axis=1)\n", + "y_train = train_df[\"Exited\"]\n", + "X_test = test_df.drop(\"Exited\", axis=1)\n", + "y_test = test_df[\"Exited\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "\n", + "# Logistic Regression grid params\n", + "log_reg_params = {\n", + " \"penalty\": [\"l1\", \"l2\"],\n", + " \"C\": [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n", + " \"solver\": [\"liblinear\"],\n", + "}\n", + "\n", + "# Grid search for Logistic Regression\n", + "from sklearn.model_selection import GridSearchCV\n", + "\n", + "grid_log_reg = GridSearchCV(LogisticRegression(), log_reg_params)\n", + "grid_log_reg.fit(X_train, y_train)\n", + "\n", + "# Logistic Regression best estimator\n", + "log_reg = grid_log_reg.best_estimator_" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Initialize the ValidMind objects\n", + "\n", + "Let's initialize the ValidMind `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize the datasets into their own dataset objects\n", + "vm_train_ds = vm.init_dataset(\n", + " input_id=\"train_dataset_final\",\n", + " dataset=train_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "vm_test_ds = vm.init_dataset(\n", + " input_id=\"test_dataset_final\",\n", + " dataset=test_df,\n", + " target_column=\"Exited\",\n", + ")\n", + "\n", + "# Initialize a model object\n", + "vm_model = vm.init_model(log_reg, input_id=\"log_reg_model_v1\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Assign predictions\n", + "\n", + "Once the model is registered, we'll assign predictions to the training and test datasets:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm_train_ds.assign_predictions(model=vm_model)\n", + "vm_test_ds.assign_predictions(model=vm_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Add custom tests\n", + "\n", + "We'll also add the same custom tests we implemented in the previous notebook so that this session has access to the same custom inline test and local test provider." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Implement custom inline test\n", + "\n", + "Let's set up a custom inline test that calculates the confusion matrix for a binary classification model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# First create a confusion matrix plot\n", + "import matplotlib.pyplot as plt\n", + "from sklearn import metrics\n", + "\n", + "# Get the predicted classes\n", + "y_pred = log_reg.predict(vm_test_ds.x)\n", + "\n", + "confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\n", + "\n", + "cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + ")\n", + "cm_display.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the reusable ConfusionMatrix inline test with normalized matrix\n", + "@vm.test(\"my_custom_tests.ConfusionMatrix\")\n", + "def confusion_matrix(dataset, model, normalize=False):\n", + " \"\"\"The confusion matrix is a table that is often used to describe the performance of a classification model on a set of data for which the true values are known.\n", + "\n", + " The confusion matrix is a 2x2 table that contains 4 values:\n", + "\n", + " - True Positive (TP): the number of correct positive predictions\n", + " - True Negative (TN): the number of correct negative predictions\n", + " - False Positive (FP): the number of incorrect positive predictions\n", + " - False Negative (FN): the number of incorrect negative predictions\n", + "\n", + " The confusion matrix can be used to assess the holistic performance of a classification model by showing the accuracy, precision, recall, and F1 score of the model on a single figure.\n", + " \"\"\"\n", + " y_true = dataset.y\n", + " y_pred = dataset.y_pred(model=model)\n", + "\n", + " if normalize:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred, normalize=\"all\")\n", + " else:\n", + " confusion_matrix = metrics.confusion_matrix(y_true, y_pred)\n", + "\n", + " cm_display = metrics.ConfusionMatrixDisplay(\n", + " confusion_matrix=confusion_matrix, display_labels=[False, True]\n", + " )\n", + " cm_display.plot()\n", + "\n", + " plt.close() # close the plot to avoid displaying it\n", + "\n", + " return cm_display.figure_ # return the figure object itself" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Test dataset with normalize=True\n", + "result = vm.tests.run_test(\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset_normalized\",\n", + " inputs={\"model\": vm_model, \"dataset\": vm_test_ds},\n", + " params={\"normalize\": True},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Add a local test provider\n", + "\n", + "Finally, let's save our custom inline test to our local test provider:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create custom tests folder\n", + "tests_folder = \"my_tests\"\n", + "\n", + "import os\n", + "\n", + "# create tests folder\n", + "os.makedirs(tests_folder, exist_ok=True)\n", + "\n", + "# remove existing tests\n", + "for f in os.listdir(tests_folder):\n", + " # remove files and pycache\n", + " if f.endswith(\".py\") or f == \"__pycache__\":\n", + " os.system(f\"rm -rf {tests_folder}/{f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save custom inline test to custom tests folder\n", + "confusion_matrix.save(\n", + " tests_folder,\n", + " imports=[\"import matplotlib.pyplot as plt\", \"from sklearn import metrics\"],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Register local test provider\n", + "from validmind.tests import LocalTestProvider\n", + "\n", + "# initialize the test provider with the tests folder we created earlier\n", + "my_test_provider = LocalTestProvider(tests_folder)\n", + "\n", + "vm.tests.register_test_provider(\n", + " namespace=\"my_test_provider\",\n", + " test_provider=my_test_provider,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Reconnect to ValidMind\n", + "\n", + "After you insert test-driven blocks into your model documentation, changes should persist and become available every time you call [`vm.preview_template()`](https://docs.validmind.ai/validmind/validmind.html#preview_template).\n", + "\n", + "However, you'll need to reload the connection to the ValidMind Platform if you have added test-driven blocks when the connection was already established using [`reload()`](https://docs.validmind.ai/validmind/validmind.html#reload):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.reload()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, when you run `preview_template()` again, the three test-driven blocks you added to your documentation in the last two notebooks in should show up in the template in sections **2.3 Correlations and Interactions** and **3.2 Model Evaluation**:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vm.preview_template()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Include custom test results\n", + "\n", + "Since your custom test IDs are now part of your documentation template, you can now run tests for an entire section and all additional custom tests should be loaded without any issues.\n", + "\n", + "Let's run all tests in the Model Evaluation section of the documentation. Note that we have been running the sample custom confusion matrix with `normalize=True` to demonstrate the ability to provide custom parameters.\n", + "\n", + "In the **Run the model evaluation tests** section of **[2 — Start the model development process](2-start_development_process.ipynb)**, you learned how to assign inputs to individual tests with [`run_documentation_tests()`](https://docs.validmind.ai/validmind/validmind.html#run_documentation_tests). Assigning parameters is similar, you only need to provide assign a `params` dictionary to a given test ID, `my_test_provider.ConfusionMatrix` in this case.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_config = {\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:in_sample\": {\n", + " \"inputs\": {\n", + " \"dataset\": vm_train_ds,\n", + " \"model\": vm_model,\n", + " },\n", + " },\n", + " \"my_test_provider.ConfusionMatrix\": {\n", + " \"params\": {\"normalize\": True},\n", + " \"inputs\": {\"dataset\": vm_test_ds, \"model\": vm_model},\n", + " },\n", + "}\n", + "results = vm.run_documentation_tests(\n", + " section=[\"model_evaluation\"],\n", + " inputs={\n", + " \"dataset\": vm_test_ds, # Any test that requires a single dataset will use vm_test_ds\n", + " \"model\": vm_model,\n", + " \"datasets\": (\n", + " vm_train_ds,\n", + " vm_test_ds,\n", + " ), # Any test that requires multiple datasets will use vm_train_ds and vm_test_ds\n", + " },\n", + " config=test_config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Documentation template configuration\n", + "\n", + "Let's call the utility function [`vm.get_test_suite().get_default_config()`](https://docs.validmind.ai/validmind/validmind/vm_models.html#TestSuite.get_default_config) which will return the **default configuration for the entire documentation template as a dictionary:**\n", + "\n", + "- This configuration will contain all the test IDs and their default parameters.\n", + "- You can then modify this configuration as needed and pass it to `run_documentation_tests()` to run all tests in the documentation template if needed.\n", + "- You still have the option to continue running tests for one section at a time; `get_default_config()` simply provides a useful reference for providing default parameters to every test." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "model_test_suite = vm.get_test_suite()\n", + "config = model_test_suite.get_default_config()\n", + "print(\"Suite Config: \\n\", json.dumps(config, indent=2))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "\n", + "### Update the config\n", + "\n", + "The default config does not assign any inputs to a test, but you can assign inputs to individual tests as needed depending on the datasets and models you want to pass to individual tests.\n", + "\n", + "For this particular documentation template (binary classification), the ValidMind Library provides a sample configuration that can be used to populate the entire model documentation using the following inputs as placeholders:\n", + "\n", + "- A **`raw_dataset`** raw dataset\n", + "- A **`train_dataset`** training dataset\n", + "- A **`test_dataset`** test dataset\n", + "- A trained **`model`** instance\n", + "\n", + "As part of updating the `config` you will need to ensure the correct `input_id`s are used in the final config passed to `run_documentation_tests()`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "metadata": {} + }, + "outputs": [], + "source": [ + "from validmind.datasets.classification import customer_churn\n", + "from validmind.utils import preview_test_config\n", + "\n", + "test_config = customer_churn.get_demo_test_config()\n", + "preview_test_config(test_config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using this sample configuration, let's finish populating model documentation by running all tests for the Model Development section of the documentation.\n", + "\n", + "Recall that the training and test datasets in our exercise have the following `input_id` values:\n", + "\n", + "- **`train_dataset_final`** for the training dataset\n", + "- **`test_dataset_final`** for the test dataset\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"validmind.model_validation.ModelMetadata\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\"},\n", + " },\n", + " \"validmind.data_validation.DatasetSplit\": {\n", + " \"inputs\": {\"datasets\": [\"train_dataset_final\", \"test_dataset_final\"]},\n", + " },\n", + " \"validmind.model_validation.sklearn.PopulationStabilityIndex\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\"num_bins\": 10, \"mode\": \"fixed\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.ConfusionMatrix\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"my_test_provider.ConfusionMatrix\": {\n", + " \"inputs\": {\"dataset\": \"test_dataset_final\", \"model\": \"log_reg_model_v1\"},\n", + " },\n", + " \"my_custom_tests.ConfusionMatrix:test_dataset_normalized\": {\n", + " \"inputs\": {\"dataset\": \"test_dataset_final\", \"model\": \"log_reg_model_v1\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:in_sample\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"train_dataset_final\"}\n", + " },\n", + " \"validmind.model_validation.sklearn.ClassifierPerformance:out_of_sample\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"}\n", + " },\n", + " \"validmind.model_validation.sklearn.PrecisionRecallCurve\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.ROCCurve\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.TrainingTestDegradation\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\n", + " \"metrics\": [\"accuracy\", \"precision\", \"recall\", \"f1\"],\n", + " \"max_threshold\": 0.1,\n", + " },\n", + " },\n", + " \"validmind.model_validation.sklearn.MinimumAccuracy\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"min_threshold\": 0.7},\n", + " },\n", + " \"validmind.model_validation.sklearn.MinimumF1Score\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"min_threshold\": 0.5},\n", + " },\n", + " \"validmind.model_validation.sklearn.MinimumROCAUCScore\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"min_threshold\": 0.5},\n", + " },\n", + " \"validmind.model_validation.sklearn.PermutationFeatureImportance\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " },\n", + " \"validmind.model_validation.sklearn.SHAPGlobalImportance\": {\n", + " \"inputs\": {\"model\": \"log_reg_model_v1\", \"dataset\": \"test_dataset_final\"},\n", + " \"params\": {\"kernel_explainer_samples\": 10},\n", + " },\n", + " \"validmind.model_validation.sklearn.WeakspotsDiagnosis\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\n", + " \"thresholds\": {\"accuracy\": 0.75, \"precision\": 0.5, \"recall\": 0.5, \"f1\": 0.7}\n", + " },\n", + " },\n", + " \"validmind.model_validation.sklearn.OverfitDiagnosis\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\"cut_off_percentage\": 4},\n", + " },\n", + " \"validmind.model_validation.sklearn.RobustnessDiagnosis\": {\n", + " \"inputs\": {\n", + " \"model\": \"log_reg_model_v1\",\n", + " \"datasets\": [\"train_dataset_final\", \"test_dataset_final\"],\n", + " },\n", + " \"params\": {\n", + " \"scaling_factor_std_dev_list\": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5],\n", + " \"accuracy_decay_threshold\": 4,\n", + " },\n", + " },\n", + "}\n", + "\n", + "\n", + "full_suite = vm.run_documentation_tests(\n", + " section=\"model_development\",\n", + " config=config,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## In summary\n", + "\n", + "In this final notebook, you learned how to:\n", + "\n", + "- [x] Refresh the connection from the ValidMind Library to the ValidMind Platform after you've inserted test-driven blocks to your documentation\n", + "- [x] Include custom test results in your model documentation\n", + "- [x] View and configure the configuration for your model documentation template\n", + "\n", + "With our ValidMind for model development series of notebooks, you learned how to document a model end-to-end with the ValidMind Library by running through some common scenarios in a typical model development setting:\n", + "\n", + "- Running out-of-the-box tests\n", + "- Documenting your model by adding evidence to model documentation\n", + "- Extending the capabilities of the ValidMind Library by implementing custom tests\n", + "- Ensuring that the documentation is complete by running all tests in the documentation template" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Next steps" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Work with your model documentation\n", + "\n", + "Now that you've logged all your test results and generated a draft for your model documentation, head to the ValidMind Platform to make qualitative edits, view guidelines, collaborate with validators, and submit your model documentation for approval when it's ready. **Learn more:** [Working with model documentation](https://docs.validmind.ai/guide/model-documentation/working-with-model-documentation.html)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "### Learn more\n", + "\n", + "Now that you're familiar with the basics, you can explore the following notebooks to get a deeper understanding on how the ValidMind Library allows you generate model documentation for any use case:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Use cases\n", + "\n", + "- [Application scorecard demo](../../code_samples/credit_risk/application_scorecard_demo.ipynb)\n", + "- [Linear regression documentation demo](../../code_samples/regression/quickstart_regression_full_suite.ipynb)\n", + "- [LLM model documentation demo](../../code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### More how-to guides and code samples\n", + "\n", + "- [Explore available tests in detail](../../how_to/explore_tests.ipynb)\n", + "- [In-depth guide on running dataset based tests](../../how_to/run_tests/1_run_dataset_based_tests.ipynb)\n", + "- [In-depth guide for implementing custom tests](../../code_samples/custom_tests/implement_custom_tests.ipynb)\n", + "- [In-depth guide to external test providers](../../code_samples/custom_tests/integrate_external_test_providers.ipynb)\n", + "- [Configuring dataset features](../../how_to/configure_dataset_features.ipynb)\n", + "- [Introduction to unit and composite metrics](../../how_to/run_unit_metrics.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "#### Discover more learning resources\n", + "\n", + "All notebook samples can be found in the following directories of the ValidMind Library GitHub repository:\n", + "\n", + "- [Code samples](https://github.com/validmind/validmind-library/tree/main/notebooks/code_samples)\n", + "- [How-to guides](https://github.com/validmind/validmind-library/tree/main/notebooks/how_to)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ValidMind Library", + "language": "python", + "name": "validmind" + }, + "language_info": { + "name": "python", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/site/notebooks/EXECUTED/model_development/add-content-block.gif b/site/notebooks/EXECUTED/model_development/add-content-block.gif new file mode 100644 index 0000000000..daab9d44d8 Binary files /dev/null and b/site/notebooks/EXECUTED/model_development/add-content-block.gif differ diff --git a/site/notebooks/EXECUTED/model_development/high-pearson-correlation-block.png b/site/notebooks/EXECUTED/model_development/high-pearson-correlation-block.png new file mode 100644 index 0000000000..dbe44392d1 Binary files /dev/null and b/site/notebooks/EXECUTED/model_development/high-pearson-correlation-block.png differ diff --git a/site/notebooks/EXECUTED/model_development/selecting-confusion-matrix-test.png b/site/notebooks/EXECUTED/model_development/selecting-confusion-matrix-test.png new file mode 100644 index 0000000000..de27323a76 Binary files /dev/null and b/site/notebooks/EXECUTED/model_development/selecting-confusion-matrix-test.png differ diff --git a/site/notebooks/EXECUTED/model_development/selecting-high-pearson-correlation-test.png b/site/notebooks/EXECUTED/model_development/selecting-high-pearson-correlation-test.png new file mode 100644 index 0000000000..2c4b87c6bb Binary files /dev/null and b/site/notebooks/EXECUTED/model_development/selecting-high-pearson-correlation-test.png differ diff --git a/site/notebooks/tutorials/model_development/2-start_development_process.ipynb b/site/notebooks/tutorials/model_development/2-start_development_process.ipynb index 74bec6960a..e886dc7ff4 100644 --- a/site/notebooks/tutorials/model_development/2-start_development_process.ipynb +++ b/site/notebooks/tutorials/model_development/2-start_development_process.ipynb @@ -74,8 +74,6 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\n", - "\n", "## Setting up" ] }, diff --git a/site/python-docs.zip b/site/python-docs.zip index abbee90668..f79a0741a8 100644 Binary files a/site/python-docs.zip and b/site/python-docs.zip differ diff --git a/site/releases/2025/2025-apr-24/release-notes.qmd b/site/releases/2025/2025-apr-24/release-notes.qmd index b08720adcc..d9b455b078 100644 --- a/site/releases/2025/2025-apr-24/release-notes.qmd +++ b/site/releases/2025/2025-apr-24/release-notes.qmd @@ -104,7 +104,7 @@ Labels: documentation, highlight :::: {.flex .flex-wrap .justify-around} -::: {.w-60-ns} +::: {.w-60-ns .pr2} We've completely redesigned our {{< var validmind.api >}} reference using the same tools we use to produce the rest of our product documentation, allowing us to more easily keep this information up to date and ensure its accuracy. ::: diff --git a/site/training/_compatibility.qmd b/site/training/_compatibility.qmd new file mode 100644 index 0000000000..2e3b1a07f9 --- /dev/null +++ b/site/training/_compatibility.qmd @@ -0,0 +1,4 @@ +[browser compatibility]{.smallcaps .pink} + +{{< var validmind.training >}} courses work best with Chromium-based web browsers, such as Google Chrome, Microsoft Edge, and Opera. + diff --git a/site/training/administrator-fundamentals/administrator-fundamentals-register.qmd b/site/training/administrator-fundamentals/administrator-fundamentals-register.qmd index 208467fcea..99f572a1d5 100644 --- a/site/training/administrator-fundamentals/administrator-fundamentals-register.qmd +++ b/site/training/administrator-fundamentals/administrator-fundamentals-register.qmd @@ -6,7 +6,12 @@ aliases: - training-for-administrators.html --- -As an administrator who is new to {{< var vm.product >}}, learn how to onboard your organization, manage users and roles, and manage permissions for specific roles. +Learn how to use {{< var vm.product >}} as an **administrator** to onboard your organization, manage users, roles, and permissions, and customize the {{< var vm.platform >}} for your institution's requirements. + +::: {.column-margin} +{{< include /training/_compatibility.qmd >}} + +::: ::: {.attn} diff --git a/site/training/assets/slides.scss b/site/training/assets/slides.scss index 0ba87a296c..2af460f6c5 100644 --- a/site/training/assets/slides.scss +++ b/site/training/assets/slides.scss @@ -164,3 +164,9 @@ section.has-dark-background a:hover { color: #b67a16ff; } +.reveal .slide input[type="checkbox"] { + accent-color: #de257e; /* Changes the checkmark color */ + border: 2px solid #de257e; + border-radius: 4px; /* Optional: makes it slightly rounded */ +} + diff --git a/site/training/developer-fundamentals/developer-fundamentals-register.qmd b/site/training/developer-fundamentals/developer-fundamentals-register.qmd index f15d5be2c4..a7913460b0 100644 --- a/site/training/developer-fundamentals/developer-fundamentals-register.qmd +++ b/site/training/developer-fundamentals/developer-fundamentals-register.qmd @@ -4,9 +4,48 @@ subtitle: For {{< var vm.product >}} date: last-modified aliases: - training-for-model-developers.html +listing: + - id: developer-modules + type: grid + grid-columns: 2 + max-description-length: 500 + sort: false + table-hover: true + image-align: left + contents: + - path: using-validmind-for-model-development.html + title: "Using {{< var vm.product >}} for Model Development" + subtitle: "Module 1" + description: "{{< fa check >}} Register a model in the {{< var validmind.platform >}}
{{< fa check >}} Install the {{< var validmind.developer >}} in your environment
{{< fa check >}} Connect to your registered model
{{< fa check >}} Preview your model's documentation template" + reading-time: "15" + author: "{{< var vm.product >}}" + - path: learning-to-run-tests.html + title: "Learning to Run Tests" + subtitle: "Module 2" + description: "{{< fa check >}} Identify relevant tests to run from {{< var vm.product >}}'s test vault
{{< fa check >}} Initialize datasets & models for use with the {{< var validmind.developer >}}
{{< fa check >}} Run and log out-of-the-box tests for new and existing models
{{< fa check >}} Insert test results into your model's documentation" + reading-time: "45" + author: "{{< var vm.product >}}" + - path: implementing-custom-tests.html + title: "Implementing Custom Tests" + subtitle: "Module 3" + description: "{{< fa check >}} Implement, run, and log custom inline and reusable tests
{{< fa check >}} Refresh your template to include inserted test-driven blocks
{{< fa check >}} Configure your template to include custom tests" + reading-time: "45" + author: "{{< var vm.product >}}" + - path: finalizing-model-documentation.html + title: "Finalizing Model Documentation" + subtitle: "Module 4" + description: "{{< fa check >}} Refine your model documentation
{{< fa check >}} Submit your model documentation for approval
{{< fa check >}} Track changes and other updates to your model" + reading-time: "20" + author: "{{< var vm.product >}}" + fields: [title, subtitle, description, reading-time] --- -As a developer who is new to {{< var vm.product >}}, learn how to generate model documentation, add your own tests, edit the content, and then submit your documentation for approval. +Learn how to use {{< var vm.product >}} as a **developer** to generate model documentation, automate testing, and track your model's progress through the model lifecycle. + +::: {.column-margin} +{{< include /training/_compatibility.qmd >}} + +::: ::: {.attn} @@ -15,45 +54,24 @@ As a developer who is new to {{< var vm.product >}}, learn how to generate model :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns .pr3} -### Learn how to - -#### Part 1 - -- Initialize the {{< var validmind.developer >}} -- Start the model development process -- Edit model documentation -- Collaborate with others - -#### Part 2 - -- Train a model -- Implement custom tests and integrate external test providers -- Finalize testing and documentation +#### Total duration -#### Part 3 +120 minutes -- View documentation activity -- Submit for approval - -::: - -::: {.w-50-ns .pa3} - -::: {.preview source="developer-fundamentals.qmd"} ::: -#### Duration - -100 minutes +::: {.w-50-ns .pa3 .tr} -[register](#register){.button-green .cta} +[register now](#register){.button-green .cta} ::: :::: +:::{#developer-modules} ::: +::: {{< include /training/_training-register.qmd >}} diff --git a/site/training/developer-fundamentals/developer-fundamentals.qmd b/site/training/developer-fundamentals/developer-fundamentals.qmd deleted file mode 100644 index 683ac5ae50..0000000000 --- a/site/training/developer-fundamentals/developer-fundamentals.qmd +++ /dev/null @@ -1,462 +0,0 @@ ---- -title: "Developer
Fundamentals" -subtitle: "docs.validmind.ai/training

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" -lightbox: true -format: - revealjs: - controls: true - controls-tutorial: true - help: true - controls-back-arrows: visible - transition: slide - theme: [default, ../assets/slides.scss] - slide-number: true - chalkboard: false - preview-links: auto - view-distance: 2 - logo: /validmind.png - footer: "{{< var validmind.training >}} | [Home {{< fa person-walking-dashed-line-arrow-right >}}](/training/training.qmd)" - html: - output-file: _developer-fundamentals.html - search: false -title-slide-attributes: - data-background-color: "#083E44" - data-background-image: "../assets/home-hero.svg" -skip_preview: true ---- - -# Learning objectives - -_"As a developer who is new to {{< var vm.product >}}, I want to learn how to generate model documentation, add my own tests, edit the content, and then submit my documentation for approval."_ - -## In this course - -:::: {.columns .f3} -::: {.column width="35%" .mt4 .pr4} -### PART 1 - -- [Initialize the {{< var validmind.developer >}}](#initialize-the) -- [Start the model development process](#start-the-model-development-process) -- [Edit model documentation](#edit-model-documentation) -- [Collaborate with others](#collaborate-with-others) -::: - -::: {.column width="35%" .mt4 .pr4} -### PART 2 - -- [Train a model](#train-a-model) -- [Implement custom tests and integrate external test providers](#implement-custom-tests-and-integrate-external-test-providers) -- [Finalize testing and documentation](#finalize-testing-and-documentation) - -::: - -::: {.column width="30%" .mt4} -### PART 3 - -- [View documentation activity](#view-documentation-activity) -- [Submit for approval](#submit-for-approval) -::: -:::: - -First, let's make sure you can log in to {{< var vm.product >}}. - -{{< include /training/assets/_revealjs-navigation.qmd >}} - -## Can you log in? - -To try out this course, you need to have been [onboarded](developer-fundamentals-register.qmd#register) onto {{< var validmind.training >}} with the [**{{< fa code >}} Developer**]{.bubble} role. - -
Log in to check your access: - -:::: {.columns} -::: {.column width="50%"} -::: {.tc} -[Log in to JupyterHub]({{< var url.jupyterhub >}}/){.button target="_blank"} -::: - -::: -::: {.column width="50%"} -::: {.tc} -[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai){.button target="_blank"} -::: - -::: -:::: -
- -::: {.tc} -Be sure to return to this page afterwards. -::: - -# You're in — let's show you around. - - - -## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html" background-interactive="yes" data-preload="yes"} - - -:::: {.absolute bottom=15 left=0 right=50 .w-100 .f3 .tc .pl4 .pr4 .overlay} -**This introductory notebook includes sample code and how-to information, all in one place.** - -::: {.f4} -When run on JupyterHub, this notebook will generate model documentation and upload it to {{< var vm.product >}}. After you finish this course, your own training notebook will look similar, complete with output. - -For now, **scroll through this notebook** to explore. When you are done, click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -## {background-iframe="https://app.prod.validmind.ai" background-interactive="true" data-preload="yes"} - -::: {.fr .f3 .nr5 .pa5 .overlay} -**This is the {{< var validmind.platform >}}.** - -From here, you can: - -- Register models in the model inventory. -- Review and edit model documentation
generated with the introductory notebook. -- Collaborate with model validators to get
your documentation approved. -- And much more! - -::: {.f4 .pl3 .pr3 .embed} -**To start the documentation process**, you register a
new model in the model inventory or select one that
has already been registered. -::: - -**Explore {{< var vm.product >}} live** on the next page. {{< fa hand-point-right >}} -::: - -## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} - -:::: {.fr .f4 .mv5 .nr5 .pa4 .overlay} - -From the **{{< fa cubes >}} Inventory**: - -1. Open a model, such as: - -::: {.f5 .nt2} - **[Quickstart] Customer Churn Model** -::: - -2. Explore **{{< fa book-open >}} Documentation** for
model documentation. -3. Check **{{< fa rocket >}} Getting Started** for
the code snippet. - -::: {.pl3 .pr3 .embed} -**Did you find the _code snippet_?**
You will copy and paste a similar
snippet into your own notebook
later to upload documentation. -::: - -When you're done, click [{{< fa chevron-right >}}]() to
continue. -:::: - -# PART 1 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} - -# Initialize the {{< var vm.developer >}} {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - - - -## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html#initializing-the-validmind-library" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -On JupyterHub: Run the cells in **1. Initializing the {{< var validmind.developer >}}**. - -::: {.f5 .nt2} -When you are done, return to this page and click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# Start the model development process {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - - - -## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html#start-the-model-development-process-with-raw-data-run-out-of-the-box-tests-and-add-evidence-to-model-documentation" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -On JupyterHub: Run the cells in **2. Start the model development process with raw data, run out-of-the box tests, and add evidence to model documentation**. - -::: {.f4} -When you reach **Add individual test results to model documentation**, return to this page and click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# Edit model documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - -## {.scrollable} - -:::: {.columns} -::: {.column width="30%" .pr4 .f2} -Now that you have generated documentation, edit it on {{< var vm.product >}} to add text or test-driven content blocks. - -::: {.f5 .nt2 .pl2 .mb4} -(Scroll down for the full instructions.) -::: - -::: {.tc} -[Learn more ...](/guide/model-documentation/work-with-content-blocks.qmd){.button target="_blank"} -::: - -
Try it **live** on the next page. {{< fa hand-point-right >}} -::: - -::: {.column width="70%" .bl .pl4 .f3} -### Add a test-driven content block - -Content blocks provide sections that are part of your model documentation — you can always add more, as required, and fill them with text or test results. - -1. Select a model or [find your model by applying a filter or searching for it](/guide/model-inventory/working-with-model-inventory.qmd#search-filter-and-sort-models){target="_blank"}. - -2. In the left sidebar that appears for your model, click **{{< fa book-open >}} Documentation**. - -3. Navigate to the **2.3. Correlations and Interactions** section. - -4. Hover the cursor after the Pearson Correlation Matrix content block until a horizontal dashed line with a {{< fa square-plus >}} button appears that indicates you can insert a new block: - - ![Screenshot showing the insert button for test-driven blocks](/guide/model-documentation/add-content-block.gif){fig-alt="Screenshot showing the insert button for test-driven blocks" .screenshot} - -4. Click {{< fa square-plus >}} and then select **Test-Driven Block**: - - - In the search bar, type in `HighPearsonCorrelation`. - - Select **`HighPearsonCorrelation:balanced_raw_dataset`** as the test. - - A preview of the test gets shown: - - ![Screenshot showing the selected test result in the dialog](selecting-high-pearson-correlation-test.png){fig-alt="Screenshot showing the selected test result in the dialog" .screenshot} - -5. Click **Insert 1 Test Result to Document**. - -After you have completed these steps, the new content block becomes a part of your model documentation. You will now see two individual results for the high-correlation test in the **2.3. Correlations and Interactions** section of the documentation. - -To finalize the documentation, you can also edit the description of the test result to explain the changes made to the raw data and the reasons behind them. For example: - -![Screenshot showing description added to the new content block](/notebooks/images/high-pearson-correlation-block.png){fig-alt="Screenshot showing description added to the new content block" .screenshot} - -::: -:::: - -## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -Locate the model documentation you generated and add a test-driven block to the **2.3 Correlations and Interactions** section. - -::: {.f5 .nt2} -When you are done, click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# Collaborate with others {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - -## {.scrollable} - -:::: {.columns} -::: {.column width="30%" .pr4 .f2} -Have a question? Collaborate with other developers or with your validator right in the model documentation. - -::: {.tc} -[Learn more ...](/guide/model-documentation/collaborate-with-others.qmd){.button target="_blank"} -::: - -
Try it **live** on the next page. {{< fa hand-point-right >}} -::: - -::: {.column width="70%" .bl .pl4 .f3} -::: {.f5 .nt2} -::: - -{{< include /guide/model-documentation/_collaborate-with-others-activity.qmd >}} - -::: {.panel-tabset} - -{{< include /guide/model-documentation/_collaborate-with-others-comments.qmd >}} - -::: - -::: - -:::: - - - -## {background-iframe="https://app.prod.validmind.ai" background-interactive="yes" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .pr4 .overlay} -In the documentation, post a comment, reply to it, and then resolve the thread.
Review your comments in the **Recent Activity** feed on the front page. - -::: {.f5 .nt2} -When you are done, click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# PART 2 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} - -# Train a model {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - - - -## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html#model-testing" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -On JupyterHub: Run the cells under **Model Testing**. - -::: {.f5 .nt2} -When you are done, return to this page and click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# Implement custom tests and integrate external test providers {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - - - -## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html#implementing-custom-tests" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -On JupyterHub: Run the cells in **3. Implementing custom tests**. - -::: {.f5 .nt2} -When you are done, return to this page and click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# Finalize testing and documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - - - -## {background-iframe="/notebooks/tutorials/intro_for_model_developers_EXECUTED.html#finalize-testing-and-documentation" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} - -On JupyterHub: Run the cells in **4. Finalize testing and documentation**. - -::: {.f5 .nt2} -When you are done, return to this page and click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# PART 3 {background-color="#083E44" background-image="/assets/img/solutions-hero.png"} - -# View documentation activity {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - -## {.scrollable} - -:::: {.columns} -::: {.column width="30%" .pr4 .f2} -Track changes and updates made to model documentation over time. - -::: {.f5 .nt2 .pl2 .mb4} -(Scroll down for the full instructions.) -::: - -::: {.tc} -[Learn more ...](/guide/model-inventory/view-model-activity.qmd){.button target="_blank"} -::: - -
Try it **live** on the next page. {{< fa hand-point-right >}} -::: - -::: {.column width="70%" .bl .pl4 .f3} -### View documentation activity - -{{< include /guide/model-inventory/_view-model-activity-overview.qmd >}} - -{{< include /guide/model-inventory/_view-model-activity-steps.qmd >}} - -::: - -:::: - -## {background-iframe="https://app.prod.validmind.ai/" background-interactive="true" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -Review the **Recent Activity** feed on the front page: - -:::: {.columns} -::: {.column width="25%" .nt2} -- Comments -::: -::: {.column width="25%" .nt2} -- Status updates -::: -::: {.column width="25%" .nt2} -- Model updates -::: -::: {.column width="25%" .nt2} -- Test results -::: -:::: - -::: {.f5 .nt3} -When you are done, click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# Submit for approval {background-color="#083E44" background-image="/training/assets/home-hero.svg"} - -## {.scrollable} - -:::: {.columns} -::: {.column width="30%" .pr4 .f2} -When you’re ready, verify the approval workflow, and then submit your model documentation for approval. - -::: {.f5 .nt2 .pl2 .mb4} -(Scroll down for the full instructions.) -::: - -::: {.tc} -[Learn more ...](/guide/model-documentation/submit-for-approval.qmd){.button target="_blank"} -::: - -
Try it **live** on the next page. {{< fa hand-point-right >}} -::: - -::: {.column width="70%" .bl .pl4 .f3} - -::: {.panel-tabset} -### Verify workflow - -Workflow states and transitions are configured by an administrator in advance, but you should verify that the expected people are included in the approval process. - -{{< include /guide/model-workflows/_model-workflows-see.qmd >}} - -### Submit for approval - -To transition through the approval workflow, all required workflow steps must be completed. By default, a model must be in the
[In Documentation]{.bubble} state before you can submit it for validation. - -{{< include /guide/model-workflows/_model-workflows-transition.qmd >}} - -::: - -::: -:::: - -## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} - -:::: {.absolute bottom=15 .w-100 .f3 .tc .pl4 .overlay} -Verify the approval workflow and then submit your model documentation for approval. - -::: {.f5 .nt2} -When you are done, click [{{< fa chevron-right >}}]() to continue. -::: -:::: - -# About model documentation - -There is more that {{< var vm.product >}} can do to help you create model documentation, from using your own template to code samples you can adapt for your own use case. - -::: {.tc} -[All model documentation guides](/guide/guides.qmd#model-documentation){.button target="_blank"} -::: -
- -Or, find your next learning resource on [{{< var validmind.training >}}](/training/training.qmd). - - \ No newline at end of file diff --git a/site/training/developer-fundamentals/finalizing-model-documentation.qmd b/site/training/developer-fundamentals/finalizing-model-documentation.qmd new file mode 100644 index 0000000000..39447d6cc7 --- /dev/null +++ b/site/training/developer-fundamentals/finalizing-model-documentation.qmd @@ -0,0 +1,422 @@ +--- +title: "Finalizing
Model Documentation" +subtitle: "Developer Fundamentals — Module 4 of 4

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" +lightbox: true +format: + revealjs: + controls: true + controls-tutorial: true + help: true + controls-back-arrows: visible + transition: slide + theme: [default, ../assets/slides.scss] + slide-number: true + chalkboard: false + preview-links: auto + view-distance: 2 + logo: /validmind.png + footer: "{{< var validmind.training >}} | [Home {{< fa person-walking-dashed-line-arrow-right >}}](/training/training.qmd)" + html: + # Change this to the file name prepended by a _ to get around the global HTML output settings required by _metadata.yml + output-file: _finalizing-model-documentation.html + search: false +title-slide-attributes: + data-background-color: "#083E44" + data-background-image: "../assets/home-hero.svg" +skip_preview: true +--- + +# Learning objectives {.center} + +_"As a **developer** who has logged tests with the {{< var validmind.developer >}} to the {{< var validmind.platform >}}, I want to refine my model's documentation, submit my model documentation for approval, and track changes and other updates to my model."_ + +::: {.tc} +
+This final module is part of a four-part series: +

+[Developer Fundamentals](/training/developer-fundamentals/developer-fundamentals-register.qmd){.button target="_blank"} +::: + + +## Module 4 — Contents {.center} + +::: {.f2} +1. [Refine model documentation](#refine-model-documentation) +3. [Submit documentation for approval](#submit-documentation-for-approval) +4. [Collaborate with others](#collaborate-with-others) +5. [Track model activity](#track-model-activity) + +::: + +First, let's make sure you can log in to {{< var vm.product >}}. + +{{< include /training/assets/_revealjs-navigation.qmd >}} + +## Before you begin {.center} + +::: {.panel-tabset} + +### Prerequisite courses + +To continue, you need to have been [onboarded](developer-fundamentals-register.qmd#register){target="_blank"} onto {{< var validmind.training >}} with the [**{{< fa code >}} Developer**]{.bubble} role and completed the first three modules of this course: + + + +:::: {.columns} +::: {.column width="30%"} +::: {.tc} +[Module 1](using-validmind-for-model-development.html){.button target="_blank"} +::: + +::: +::: {.column width="30%"} +::: {.tc} +[Module 2](learning-to-run-tests.html){.button target="_blank"} +::: + +::: + +::: {.column width="30%"} +::: {.tc} +[Module 3](implementing-custom-tests.html){.button target="_blank"} +::: + +::: +:::: + +:::: {.tc .mt5 .f2 .embed} +Already logged in and refreshed this module? Click [{{< fa chevron-right >}}]() to continue. + +::: + +### Log in + +1. Log in to check your access: + +::: {.tc} +[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai){.button target="_blank"} +::: + +::: {.tc .f3} +Be sure to return to this page afterwards. +::: + +2. After you successfully log in, refresh the page to connect this training module up to the {{< var validmind.platform >}}: + +::: {.tc} + + +::: + + +::: + +# Refine model documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Review test results + +::: {.f5 .nt2 .pl2 .mb4} +(Scroll down for the full instructions.) +::: + +::: {.tc} + +[Learn more ...](/guide/model-documentation/work-with-test-results.qmd){.button target="_blank"} + +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} + +::: + +::: {.column width="70%" .bl .pl4 .f4} + +{{< include /guide/model-documentation/_test-result-metadata.qmd >}} + +### View test result metadata + +From the {{< var validmind.platform >}}: + +1. In the left sidebar, click **{{< fa cubes >}} Inventory**. + +2. Select the name of your model you registered for this course to open up the model details page. + +3. In the left sidebar that appears for your model, click **{{< fa book-open >}} Documentation**. + +{{< include /guide/model-documentation/_view-test-result-metadata.qmd >}} + + + +::: +:::: + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f4 .tc .pl4 .overlay} +**Review model documentation** + +::: {.f5} +1. From the **{{< fa cubes >}} Inventory**, select the name of your model you registered for this course to open up the model details page. +2. On the left sidebar that appears for your model, click **Documentation**. +3. Click into any section of the documentation to review the test results logged via the {{< var validmind.developer >}}.
+ For example: **2.3 Correlations and Interactions** / **3.2 Model Evaluation** + +::: + +When you're done taking a look around, click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {.scrollable} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Make qualitative edits + +::: {.f5 .nt2 .pl2 .mb4} +(Scroll down for the full instructions.) +::: + +::: {.tc} +[Learn more ...](/guide/model-documentation/work-with-content-blocks.qmd){.button target="_blank"} + +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f4} + +::: {.panel-tabset} + +### Add content blocks + +In any section of your model documentation, hover your mouse over the space where you want your new block to go until a horizontal dashed line with a {{< fa square-plus >}} sign appears that indicates you can insert a new block: + + ![Adding a content block in the UI](/guide/model-documentation/add-content-block.gif){width=90% fig-alt="A gif showing the process of adding a content block in the UI" .screenshot} + +After adding the block to your documentation, generate a content draft with AI using the [content editing toolbar](/guide/model-documentation/work-with-content-blocks.qmd#content-editing-toolbar){target="_blank"}: + +{{< include /guide/model-documentation/content_blocks/_generate-with-ai.qmd >}} + +### Edit test result descriptions +You can also use the content editing toolbar to revise the description of test results to explain the changes made to the raw data and the reasons behind them. + +For example: + +1. Locate the Data Preparation section and click on **2.3 Correlations and Interactions** to expand that section. + +2. Edit the description for our individually inserted `HighPearsonCorrelation:balanced_raw_dataset` test: + + ![Screenshot showing description added to the new content block](/notebooks/images/high-pearson-correlation-block.png){fig-alt="Screenshot showing description added to the new content block" .screenshot} + +::: + +::: +:::: + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f4 .tc .pl4 .overlay} +**Add & edit content blocks** + +::: {.f5} +1. From the **{{< fa cubes >}} Inventory**, select the name of your model you registered for this course to open up the model details page. +2. On the left sidebar that appears for your model, click **Documentation**. +3. Click into any section of the documentation to add or edit a content block. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + +# Submit documentation for approval {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Transition model status + +::: {.f5 .nt2 .pl2 .mb4} +(Scroll down for the full instructions.) +::: + +::: {.tc} +[Learn more ...](/guide/model-documentation/submit-for-approval.qmd){.button target="_blank"} +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f4} + +When you’re ready, verify the approval workflow, and then submit your model documentation for approval: + +::: {.panel-tabset} +### Verify workflow + +Workflow states and transitions are configured by an administrator in advance, but you should verify that the expected people are included in the approval process. + +{{< include /guide/model-workflows/_model-workflows-see.qmd >}} + +### Submit for approval + +To transition through the approval workflow, all required workflow steps must be completed. By default, a model must be in the
[In Documentation]{.bubble} state before you can submit it for validation. + +{{< include /guide/model-workflows/_model-workflows-transition.qmd >}} + +::: + +::: +:::: + + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f4 .tc .pl4 .overlay} +**Submit for approval** + +::: {.f5} +1. From the **{{< fa cubes >}} Inventory**, select the name of your model you registered for this course to open up the model details page. +2. Locate the **[model status]{.smallcaps}** section. +3. Open up the status transition panel, enter your **[notes]{.smallcaps}** and any other additional inventory fields, then click **Submit**. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + +# Collaborate with others {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Comment threads + +::: {.tc} +[Learn more ...](/guide/model-documentation/collaborate-with-others.qmd){.button target="_blank"} +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f3} +::: {.f5 .nt2} +::: + +{{< include /guide/model-documentation/_collaborate-with-others-activity.qmd >}} + +::: {.panel-tabset} + +{{< include /guide/model-documentation/_collaborate-with-others-comments.qmd >}} + +::: + +::: + +:::: + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f4 .tc .pl4 .overlay} +**Comment on model documentation** + +::: {.f5} +1. From the **{{< fa cubes >}} Inventory**, select the name of your model you registered for this course to open up the model details page. +2. In the left sidebar that appears for your model, click **Documentation**. +3. **In the content block you added earlier:** Post a comment, reply to it, and then resolve the thread. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + + +# Track model activity {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Track changes & updates + +::: {.tc} +[Learn more ...](/guide/model-inventory/view-model-activity.qmd){.button target="_blank"} +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f4} +### View documentation activity + +{{< include /guide/model-inventory/_view-model-activity-overview.qmd >}} + +{{< include /guide/model-inventory/_view-model-activity-steps.qmd >}} + +::: + +:::: + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f4 .tc .pl4 .overlay} +**Review model activity** + +::: {.f5} +1. From the **{{< fa cubes >}} Inventory**, select the name of your model you registered for this course to open up the model details page. +2. In the left sidebar that appears for your model, click **Model Activity**. +3. Filter the following activity: **Comments** | **Status Updates** | **Model Updates** | **Test Results** + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + + +# In summary {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Finalizing model documentation + + +::: {.f3} +
Want to learn more? Find your next learning resource on [{{< fa graduation-cap >}} {{< var validmind.training >}}](/training/training.qmd){target="_blank"}. + +::: + +::: + +::: {.column width="70%" .bl .pl4 .f4} +In this final module, you learned how to: + +- [x] View logged test result metadata +- [x] Add or edit content blocks in your model documentation +- [x] Submit your model documentation for approval +- [x] Collaborate with other stakeholders on your model +- [x] Track updates on your model +::: +:::: + +::: {.f2} +
+There is more that {{< var vm.product >}} can do to help you create model documentation, from using your own template to code samples you can adapt for your own use case: +::: + +::: {.tc} +[All model documentation guides](/guide/guides.qmd#model-documentation){.button target="_blank"} + +::: \ No newline at end of file diff --git a/site/training/developer-fundamentals/implementing-custom-tests.qmd b/site/training/developer-fundamentals/implementing-custom-tests.qmd new file mode 100644 index 0000000000..e11fdda4be --- /dev/null +++ b/site/training/developer-fundamentals/implementing-custom-tests.qmd @@ -0,0 +1,667 @@ +--- +title: "Implementing
Custom Tests" +subtitle: "Developer Fundamentals — Module 3 of 4

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" +lightbox: true +format: + revealjs: + controls: true + controls-tutorial: true + help: true + controls-back-arrows: visible + transition: slide + theme: [default, ../assets/slides.scss] + slide-number: true + chalkboard: false + preview-links: auto + view-distance: 2 + logo: /validmind.png + footer: "{{< var validmind.training >}} | [Home {{< fa person-walking-dashed-line-arrow-right >}}](/training/training.qmd)" + html: + # Change this to the file name prepended by a _ to get around the global HTML output settings required by _metadata.yml + output-file: _implementing-custom-tests.html + search: false +title-slide-attributes: + data-background-color: "#083E44" + data-background-image: "../assets/home-hero.svg" +skip_preview: true +--- + +# Learning objectives {.center} + +_"As a **developer** who has run and logged out-of-the-box tests with {{< var vm.product >}}, I want to update my documentation template to include manually inserted test results, implement, run, and log custom tests, and include those custom tests in my documentation template."_ + +::: {.tc} +
+This third module is part of a four-part series: +

+[Developer Fundamentals](/training/developer-fundamentals/developer-fundamentals-register.qmd){.button target="_blank"} +::: + + +## Module 3 — Contents {.center} + +:::: {.columns .f3} +::: {.column width="50%" .mt4 .pr4} +### Introduction +- [{{< var vm.product >}} for model development](#validmind-for-model-development) +::: + +::: + +:::: {.columns .f3} + +::: {.column width="50%" .mt4 .pr4} +### Section 1 + +- [Implement custom inline tests](#implement-custom-tests) +- [Use external test providers](#use-external-test-providers) + +::: + +::: {.column width="50%" .mt4} +### Section 2 + +- [Include custom test results](#include-custom-test-results) +- [Configure the model's documentation template](#configure-documentation-templates) +::: + +:::: + + +
+First, let's make sure you can log in to {{< var vm.product >}}. + +{{< include /training/assets/_revealjs-navigation.qmd >}} + +## Before you begin {.center} + +::: {.panel-tabset} + +### Prerequisite course + +To continue, you need to have been [onboarded](developer-fundamentals-register.qmd#register){target="_blank"} onto {{< var validmind.training >}} with the [**{{< fa code >}} Developer**]{.bubble} role and completed the first two modules of this course: + + + + +:::: {.columns} +::: {.column width="60%"} +::: {.tc} +[Using {{< var vm.product >}} for model development](using-validmind-for-model-development.html){.button target="_blank"} +::: + +::: +::: {.column width="40%"} +::: {.tc} +[Learning to run tests](learning-to-run-tests.html){.button target="_blank"} +::: + +::: +:::: + +:::: {.tc .mt5 .f2 .embed} +Already logged in and refreshed this module? Click [{{< fa chevron-right >}}]() to continue. + +::: + +### Log in + +1. Log in to check your access: + +:::: {.flex .flex-wrap .justify-around} + +::: {.w-50-ns .tc} + +[Log in to JupyterHub](https://jupyterhub.validmind.ai/){.button target="_blank"} + +::: + +::: {.w-50-ns .tc} +[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai){.button target="_blank"} +::: + +:::: + + +::: {.tc .f3} +Be sure to return to this page afterwards. +::: + +2. After you successfully log in, refresh the page to connect this training module up to the {{< var validmind.platform >}}: + +::: {.tc} + + +::: + +::: + + + +# ValidMind for model development {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Jupyter Notebook series + +::: {.f3} +When you run these notebooks, they will generate a draft of model documentation and upload it to {{< var vm.product >}}, complete with test supporting test results. + +::: {.f5 .nt2 .pl2 .mb4} +
+ +You will need to have already completed notebooks **1** and **2** during the first and second modules to proceed. + +::: + +::: +::: + +::: {.column width="70%" .bl .pl4 .f3} +### {{< var vm.product >}} for model development + +Our series of four introductory notebooks for model developers include sample code and how-to information to get you started with {{< var vm.product >}}: + +1 — [Set up the {{< var validmind.developer >}}](/notebooks/tutorials/model_development/1-set_up_validmind.ipynb){target="_blank"}
+2 — [Start the model development process](/notebooks/tutorials/model_development/2-start_development_process.ipynb){target="_blank"}
+3 — [Integrate custom tests](/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"}
+4 — [Finalize testing and documentation](/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"}
+
+ +::: {.f4 .pl3 .pr3 .embed} +In this third module, we'll run through the remaining two notebooks **3** in Section 1 and **4** in Section 2 together. +::: + +::: +:::: + +Let's continue our journey with **Section 1** on the next page. {{< fa hand-point-right >}} + +# Section 1 {background-color="#083E44" background-image="/assets/img/about-us-esphere.svg"} + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html" background-interactive="yes" data-preload="yes"} + +:::: {.absolute bottom=15 left=0 right=50 .w-100 .f4 .tc .pl4 .pr4 .overlay} +**3 — Integrate custom tests** + +::: {.f5} +This is the third notebook in our introductory series, which will walk you through how to implement different types of custom tests with {{< var vm.product >}}. + +::: + +**Scroll through this notebook** to explore. When you are done, click [{{< fa chevron-right >}}]() to continue. + +:::: + +## Get your code snippet + +:::: {.columns} + +::: {.column width="80%"} + + + +
+ +
+ +::: + +::: {.column width="20%" .f4} + +::: {.f5} +{{< var vm.product >}} generates a unique *code snippet* for each registered model to connect with your developer environment: + +1. From the **{{< fa cubes >}} Inventory**, select the name of your model to open up the model details page. +2. On the left sidebar that appears for your model, click **Getting Started**. +3. Locate the code snippet and click **Copy snippet to clipboard**. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +::: + +:::: + +:::: {.tc .f6 .embed} +**Can't load the {{< var validmind.platform >}}?** + +Make sure you're logged in and have refreshed the page in a Chromium-based web browser. + +::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#initialize-the-validmind-library" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Connect to your model** + +::: {.f5} +With your code snippet copied to your clipboard: + +1. Open **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run all the cells under the **Setting up** section. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +# Implement custom tests {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Custom inline tests + +
Try it **live** on the next pages. {{< fa hand-point-right >}} + +::: + +::: {.column width="70%" .bl .pl4 .f3} +Let's implement a custom *inline test* that calculates the confusion matrix for a binary classification model. + +- An inline test refers to a test written and executed within the same environment as the code being tested — in the following example, right in our Jupyter Notebook — without requiring a separate test file or framework. +- You'll note that the custom test function is just a regular Python function that can include and require any Python library as you see fit. + +::: +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#create-a-confusion-matrix-plot" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Create a custom test** + +::: {.f5} +The [`@vm.test` wrapper](/validmind/validmind.qmd#test){target="_blank"} allows you to create a reusable test: + +1. Continue with **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run all the cells in the following section under Implementing a custom inline test: **Create a confusion matrix plot** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#add-parameters-to-custom-tests" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Adjust your custom test** + +::: {.f5} +Custom tests can take parameters just like any other function: + +1. Continue with **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run all the cells in the following sections under Implementing a custom inline test:
**Add parameters to custom tests** / **Pass parameters to custom tests** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#log-the-confusion-matrix-results" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Log your custom test** + +::: {.f5} +Use the [`.log()` method](/validmind/validmind/vm_models.qmd#log){target="_blank"} to send the results of your custom test to the {{< var validmind.platform >}}: + +1. Continue with **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run the cell in the following section under Implementing a custom inline test: **Log the confusion matrix results** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + +# Use external test providers {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Reusable custom tests + +
Try it **live** on the next pages. {{< fa hand-point-right >}} + +::: + +::: {.column width="70%" .bl .pl4 .f3} +### External test providers + +Creating inline custom tests with a function is a great way to customize your model documentation. + +- However, sometimes you may want to reuse the same set of tests across multiple models and share them with others in your organization. +- In this case, you can create an external custom test provider that will allow you to load custom tests from a local folder or a Git repository. + +::: +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#create-custom-tests-folder" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Create custom tests folder** + +::: {.f5} +Create a new folder that will contain reusable custom tests from your existing inline tests: + +1. Continue with **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run the cell under the following Using external test providers section: **Create custom tests folder** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#save-an-inline-test" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Save inline test** + +::: {.f5} +The `@vm.test` decorator also includes a convenience method that allows you to save the test to a Python file at a specified path: + +1. Continue with **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run all the cells under the Using external test providers section: **Save an inline test** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/3-integrate_custom_tests.html#register-a-local-test-provider" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Register local test provider** + +::: {.f5} +Next, let's initialize a test provider that will tell the {{< var validmind.developer >}} where to find your saved custom tests: + +1. Continue with **3 — Integrate custom tests**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"} +2. Run all the cells under the Using external test providers section: **Register a local test provider** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {.scrollable} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Custom test results + +::: {.f5 .nt2 .pl2 .mb4} +(Scroll down for the full instructions.) +::: + +::: {.tc} +[Learn more ...](/notebooks/EXECUTED/model_development/3-integrate_custom_tests.ipynb#add-test-results-to-documentation){.button target="_blank"} + +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} + +::: + +::: {.column width="70%" .bl .pl4 .f4} +### Add test results to model documentation + +With the custom test results logged, let's head to the model we connected to at the beginning of this notebook and insert our test results into the documentation: + +1. From the **{{< fa cubes >}} Inventory** in the {{< var validmind.platform >}}, go to the model you connected to earlier. + +2. In the left sidebar that appears for your model, click **{{< fa book-open >}} Documentation**. + +3. Locate the Data Preparation section and click on **3.2 Model Evaluation** to expand that section. + +4. Hover under the Pearson Correlation Matrix content block until a horizontal dashed line with a **+** button appears, indicating that you can insert a new block. + +5. Click **+** and then select **Test-Driven Block**: + + - In the search bar, type in `ConfusionMatrix`. + - Select the custom `ConfusionMatrix` tests you logged above: + + ![The ConfusionMatrix tests selected](/notebooks/EXECUTED/model_development/selecting-confusion-matrix-test.png){fig-alt="Screenshot showing the ConfusionMatrix tests selected" .screenshot} + +6. Finally, click **Insert 2 Test Results to Document** to add the test results to the documentation. + + Confirm that the two individual results for the confusion matrix tests have been correctly inserted into section **3.2 Model Evaluation** of the documentation. + +::: +:::: + + +## {background-iframe="https://app.prod.validmind.ai/model-inventory/" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f5 .tc .pl4 .overlay} +**Insert custom test-driven blocks** + +::: {.f6} +3.2 Model Evaluation — `my_custom_tests.ConfusionMatrix:test_dataset_normalized` / `my_test_provider.ConfusionMatrix` + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + + + +# Section 2 {background-color="#083E44" background-image="/assets/img/about-us-esphere.svg"} + +## {background-iframe="/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.html" background-interactive="yes" data-preload="yes"} + +:::: {.absolute bottom=15 left=0 right=50 .w-100 .f4 .tc .pl4 .pr4 .overlay} +**4 — Finalize testing and documentation** + +::: {.f5} +This is the final notebook in our introductory series, which will walk you through wrapping custom test results into your documentation, as well as how to update the configuration for the entire model documentation template to suit your needs. +::: + +**Scroll through this notebook** to explore. When you are done, click [{{< fa chevron-right >}}]() to continue. + +:::: + +## Retrieve your code snippet + +:::: {.columns} + +::: {.column width="80%"} + + + +
+ +
+ +::: + +::: {.column width="20%"} + +::: {.f4} +As usual, let's connect back up to your model in the {{< var validmind.platform >}}: + +1. From the **{{< fa cubes >}} Inventory**, select the name of your model to open up the model details page. +2. On the left sidebar that appears for your model, click **Getting Started**. +3. Locate the code snippet and click **Copy snippet to clipboard**. + +::: + +::: + +:::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +## {background-iframe="/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.html#initialize-the-validmind-library" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Connect to your model** + +::: {.f5} +With your code snippet copied to your clipboard: + +1. Open **4 — Finalize testing and documentation**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"} +2. Run all the cells under the **Setting up** section. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +# Include custom test results {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {background-iframe="/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.html#reconnect-to-validmind" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Reconnect to {{< var vm.product >}}** + +::: {.f5} +After you insert test-driven blocks, changes should persist and become available every time you call the [`preview_template()` function](/validmind/validmind.qmd#preview_template){target="_blank"}: + +1. Continue with **4 — Finalize testing and documentation**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"} +2. Run all the cells under the **Reconnect to {{< var vm.product >}}** section. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.html#include-custom-test-results" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Log custom test results** + +::: {.f5} +Now that your custom test IDs are part of your template, you can now run tests for an entire section and all additional custom tests will be loaded: + +1. Continue with **4 — Finalize testing and documentation**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"} +2. Run the cell under the **Include custom test results** section. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + +# Configure documentation templates {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Documentation template configuration + +::: {.tc} +[Learn more ...](/validmind/validmind/vm_models.qmd#get_default_config){.button target="_blank"} + +::: + +
Try it **live** on the next pages. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f3} +### `get_default_config()` + +The utility function `vm.get_test_suite().get_default_config()` will return the default configuration for the entire documentation template as a dictionary: + +- This configuration will contain all the test IDs and their default parameters. +- You can then modify this configuration as needed and pass it to `run_documentation_tests()` to run all tests in the documentation template if needed. +- You still have the option to continue running tests for one section at a time; `get_default_config()` simply provides a useful reference for providing default parameters to every test. + +::: +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.html#documentation-template-configuration" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Retrieve default configuration** + +::: {.f5} +1. Continue with **4 — Finalize testing and documentation**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"} +2. Run the first cell under the **Documentation template configuration** section. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/4-finalize_testing_documentation.html#update-the-config" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Update template configuration** + +::: {.f5} +The default configuration does not assign any inputs to a test, but you can assign inputs to individual tests as needed: + +1. Continue with **4 — Finalize testing and documentation**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"} +2. Run the all the cells under the following Documentation template configuration section: **Update the config** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + +# In summary {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Implementing custom tests + +::: + +::: {.column width="70%" .bl .pl4 .f3} +In this third module, you learned how to: + +- [x] Implement, run, and log custom inline tests +- [x] Register external test providers for reusable custom tests +- [x] Include custom test results in your documentation template +- [x] Configure your model's documentation template + +::: +:::: + +::: {.tc} +
+Continue your model development journey with: +

+ +[Finalizing model documentation](finalizing-model-documentation.html){.button target="_blank"} +::: \ No newline at end of file diff --git a/site/training/developer-fundamentals/learning-to-run-tests.qmd b/site/training/developer-fundamentals/learning-to-run-tests.qmd new file mode 100644 index 0000000000..33e14d96f0 --- /dev/null +++ b/site/training/developer-fundamentals/learning-to-run-tests.qmd @@ -0,0 +1,577 @@ +--- +title: "Learning to
Run Tests" +subtitle: "Developer Fundamentals — Module 2 of 4

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" +lightbox: true +format: + revealjs: + controls: true + controls-tutorial: true + help: true + controls-back-arrows: visible + transition: slide + theme: [default, ../assets/slides.scss] + slide-number: true + chalkboard: false + preview-links: auto + view-distance: 2 + logo: /validmind.png + footer: "{{< var validmind.training >}} | [Home {{< fa person-walking-dashed-line-arrow-right >}}](/training/training.qmd)" + html: + # Change this to the file name prepended by a _ to get around the global HTML output settings required by _metadata.yml + output-file: _learning-to-run-tests.html + search: false +title-slide-attributes: + data-background-color: "#083E44" + data-background-image: "../assets/home-hero.svg" +skip_preview: true +--- + +# Learning objectives {.center} + +_"As a **developer** who has registered a model with {{< var vm.product >}}, I want to identify relevant tests to run from {{< var vm.product >}}'s test repository, run and log tests for my model, and insert the test results into my model's documentation."_ + +::: {.tc} +
+This second module is part of a four-part series: +

+[Developer Fundamentals](/training/developer-fundamentals/developer-fundamentals-register.qmd){.button target="_blank"} +::: + + +## Module 2 — Contents {.center} + +::: {.f2} +1. [{{< var vm.product >}} for model development](#validmind-for-model-development) +2. [Explore ValidMind tests](#explore-validmind-tests) +3. [Run tests with the {{< var validmind.developer >}}](#run-validmind-tests) +4. [Log tests to the {{< var validmind.platform >}}](#log-validmind-tests) +5. [Test an existing model](#test-an-existing-model) + +::: + +First, let's make sure you can log in to {{< var vm.product >}}. + +{{< include /training/assets/_revealjs-navigation.qmd >}} + +## Before you begin {.center} + +::: {.panel-tabset} + +### Prerequisite course + +To continue, you need to have been [onboarded](developer-fundamentals-register.qmd#register){target="_blank"} onto {{< var validmind.training >}} with the [**{{< fa code >}} Developer**]{.bubble} role and completed the first module of this course: + +::: {.tc} + +[Using {{< var vm.product >}} for model development](using-validmind-for-model-development.html){.button target="_blank"} +::: + +:::: {.tc .mt5 .f2 .embed} +Already logged in and refreshed this module? Click [{{< fa chevron-right >}}]() to continue. + +::: + +### Log in + +1. Log in to check your access: + +:::: {.flex .flex-wrap .justify-around} + +::: {.w-50-ns .tc} + +[Log in to JupyterHub](https://jupyterhub.validmind.ai/){.button target="_blank"} + +::: + +::: {.w-50-ns .tc} +[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai){.button target="_blank"} +::: + +:::: + + +::: {.tc .f3} +Be sure to return to this page afterwards. +::: + +2. After you successfully log in, refresh the page to connect this training module up to the {{< var validmind.platform >}}: + +::: {.tc} + + +::: + + +::: + + + + +# ValidMind for model development {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Jupyter Notebook series + +::: {.f3} +When you run these notebooks, they will generate a draft of model documentation and upload it to {{< var vm.product >}}, complete with test supporting test results. + +::: {.f5 .nt2 .pl2 .mb4} +
+You will need to have already completed **1 — Set up the {{< var validmind.developer >}}** during the first module to proceed. +::: + +::: +::: + +::: {.column width="70%" .bl .pl4 .f3} +### {{< var vm.product >}} for model development + +Our series of four introductory notebooks for model developers include sample code and how-to information to get you started with {{< var vm.product >}}: + +1 — [Set up the {{< var validmind.developer >}}](/notebooks/tutorials/model_development/1-set_up_validmind.ipynb){target="_blank"}
+2 — [Start the model development process](/notebooks/tutorials/model_development/2-start_development_process.ipynb){target="_blank"}
+3 — [Integrate custom tests](/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"}
+4 — [Finalize testing and documentation](/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"}
+
+ +::: {.f4 .pl3 .pr3 .embed} +In this second module, we'll run through **2 — Start the model development process** together. +::: + +::: +:::: + +Let's continue our journey with **2 — Start the model development process** on the next page. {{< fa hand-point-right >}} + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html" background-interactive="yes" data-preload="yes"} + +:::: {.absolute bottom=15 left=0 right=50 .w-100 .f4 .tc .pl4 .pr4 .overlay} +**2 — Start the model development process** + +::: {.f5} +During this course, we'll run through these notebooks together, and at the end of your learning journey you'll have a fully documented sample model ready for review. + +::: + +For now, **scroll through this notebook** to explore. When you are done, click [{{< fa chevron-right >}}]() to continue. + +:::: + + + + +# Explore ValidMind tests {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {background-iframe="/developer/model-testing/test-descriptions.html" background-interactive="true" data-preload="yes"} + +::: footer +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f3 .tc .pl4 .overlay} +**{{< var vm.product >}} test repository** + +::: {.f4} +{{< var vm.product >}} provides a wealth out-of-the-box of tests to help you ensure that your model is being built appropriately. + +In this module, you'll become familiar with the individual tests available in {{< var vm.product >}}, as well as how to run them and change parameters as necessary. + +::: + +For now, **scroll through these test descriptions** to explore. When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: +::: + +## Get your code snippet + +:::: {.columns} + +::: {.column width="80%"} + + + +
+ +
+ +::: + +::: {.column width="20%" .f4} + +::: {.f5} +{{< var vm.product >}} generates a unique *code snippet* for each registered model to connect with your developer environment: + +1. From the **{{< fa cubes >}} Inventory**, select the name of your model to open up the model details page. +2. On the left sidebar that appears for your model, click **Getting Started**. +3. Locate the code snippet and click **Copy snippet to clipboard**. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +::: + +:::: + +:::: {.tc .f6 .embed} +**Can't load the {{< var validmind.platform >}}?** + +Make sure you're logged in and have refreshed the page in a Chromium-based web browser. + +::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#initialize-the-validmind-library" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Connect to your model** + +::: {.f5} +With your code snippet copied to your clipboard: + +1. Open **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the following cells in the Setting up section:
**Initialize the {{< var validmind.developer >}}** / **Import sample dataset**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#identify-qualitative-tests" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Identify qualitative tests** + +::: {.f5} +Next, we'll use the [`list_tests()` function](/notebooks/EXECUTED/model_development/1-set_up_validmind.ipynb#explore-available-tests){target="_blank"} to pinpoint tests we want to run: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run all the cells under the Setting up section: **Identify qualitative tests** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. +:::: + + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#initialize-the-validmind-datasets" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Initialize {{< var vm.product >}} datasets** + +::: {.f5} +Then, we'll use the [`init_dataset()` function](/validmind/validmind.qmd#init_dataset){target="_blank"} to connect the sample data with a {{< var vm.product >}} `Dataset` object in preparation for running tests: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the following cell in the Setting up section: **Initialize the {{< var vm.product >}} datasets** + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + + + +# Run ValidMind tests {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#run-tabular-data-tests" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Run tabular data tests** + +::: {.f5} +You run individual tests by calling the [`run_test` function](/validmind/validmind/tests.qmd#run_test){target="_blank"} provided by the `validmind.tests` module: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run all the cells under the Running tests section: **Run tabular data tests**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#utilize-test-output" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Utilize test output** + +::: {.f5} +You can utilize the output from a ValidMind test for further use, for example, if you want to remove highly correlated features: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run all the cells under the Running tests section: **Utilize test output**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + + + +# Log ValidMind tests {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Document test results + +::: {.tc} +[Learn more ...](/validmind/validmind/vm_models.qmd#log){.button target="_blank"} + +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f3} +Every test result returned by the `run_test()` function has a `.log()` method that can be used to send the test results to the {{< var validmind.platform >}}: + +- When using `run_documentation_tests()`, documentation sections will be automatically populated with the results of all tests registered in the documentation template. +- When logging individual test results to the platform, you'll need to manually add those results to the desired section of the model documentation. + +::: +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#run-and-log-multiple-tests" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Run & log multiple tests** + +::: {.f5} +The [`run_documentation_tests()` function](/validmind/validmind.qmd#run_documentation_tests){target="_blank"} allows you to run multiple tests at once and automatically log the results to your documentation: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the following cell in the Documenting results section: **Run and log multiple tests**. +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#run-and-log-an-individual-test" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Run & log an individual test** + +::: {.f5} +Next, we'll run an individual test and log the result to the {{< var validmind.platform >}}: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the following cell in the Running tests section: **Run and log an individual test**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Work with test results + +::: {.tc} +[Learn more ...](/notebooks/EXECUTED/model_development/2-start_development_process.ipynb#add-individual-test-results-to-model-documentation){.button target="_blank"} + +::: + +
Try it **live** on the next page. {{< fa hand-point-right >}} +::: + +::: {.column width="70%" .bl .pl4 .f4} +### Add individual test results to model documentation + +With the test results logged, let's head to the model we connected to at the beginning of this notebook and insert our test results into the documentation: + +1. From the **{{< fa cubes >}} Inventory** in the {{< var validmind.platform >}}, go to the model you connected to earlier. + +2. In the left sidebar that appears for your model, click **Documentation**. + +3. Locate the Data Preparation section and click on **2.3 Correlations and Interactions** to expand that section. + +4. Hover under the Pearson Correlation Matrix content block until a horizontal dashed line with a **+** button appears, indicating that you can insert a new block. + +5. Click **+** and then select **Test-Driven Block**: + + - In the search bar, type in `HighPearsonCorrelation`. + - Select `HighPearsonCorrelation:balanced_raw_dataset` as the test. + +6. Finally, click **Insert 1 Test Result to Document** to add the test result to the documentation. + + Confirm that the individual results for the high correlation test has been correctly inserted into section **2.3 Correlations and Interactions** of the documentation. + +::: +:::: + + +## {background-iframe="https://app.prod.validmind.ai/model-inventory/" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f5 .tc .pl4 .overlay} +**Insert a test-driven block** + +::: {.f6} +2.3 Correlations and Interactions — `HighPearsonCorrelation:balanced_raw_dataset` + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + + +# Test an existing model {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="50%" .pr4 .f2} +Model testing with {{< var vm.product >}} + +::: + +::: {.column width="50%" .bl .pl4 .f2} +Try it **live** on the next pages. {{< fa hand-point-right >}} + +::: +:::: + +::: {.f3} +
+So far, we’ve focused on the data assessment and pre-processing that usually occurs prior to any models being built. Now, let’s instead assume we have already built a model and we want to incorporate some model results into our documentation: + +::: {.panel-tabset .f4} + +### 1. Train your model + +Using {{< var vm.product >}} tests, we’ll train a simple logistic regression model on our dataset and evaluate its performance by using the `LogisticRegression` class from the `sklearn.linear_model`. + +### 2. Initialize the model object + +The last step for evaluating the model’s performance is to initialize the {{< var vm.product >}} `Dataset` and `Model` objects in preparation for assigning model predictions to each dataset. + +### 3. Assign predictions + +Once the model has been registered you can assign model predictions to the training and test datasets. The `assign_predictions()` method from the `Dataset` object can link existing predictions to any number of models. + +### 4. Run the model evaluation tests +In this next example, we’ll focus on running the tests within the Model Development section of the model documentation. Only tests associated with this section will be executed, and the corresponding results will be updated in the model documentation. + +::: + +::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#train-simple-logistic-regression-model" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Train your model** + +::: {.f5} +Using {{< var vm.product >}} tests, we'll train a simple logistic regression model on our dataset and evaluate its performance: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run all the cells under the Model testing section: **Train simple logistic regression model**. +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#initialize-model-evaluation-objects" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Initialize a model object** + +::: {.f5} +Use the `init_dataset` and [`init_model` functions](/validmind/validmind.qmd#init_model){target="_blank"} to initialize these objects: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the cell under the following Model testing section: **Initialize model evaluation objects**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#assign-predictions" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Assign predictions** + +::: {.f5} +Use the [`assign_predictions()` method](/validmind/validmind/vm_models.qmd#assign_predictions){target="_blank"} from the `Dataset` object to link existing predictions to any number of models: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the cell under the following Model testing section: **Assign predictions**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="/notebooks/EXECUTED/model_development/2-start_development_process.html#run-the-model-evaluation-tests" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Run the model evaluation tests** + +::: {.f5} +Finally, we'll run only the tests within the Model Development section of the model documentation: + +1. Continue with **2 — Start the model development process**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/2-start_development_process.ipynb){target="_blank"} +2. Run the cell under the following Model testing section: **Run the model evaluation tests**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + + +# In summary {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Learning to run tests + +::: + +::: {.column width="70%" .bl .pl4 .f3} +In this second module, you learned how to: + +- [x] Identify relevant tests to run from {{< var vm.product >}}'s test repository +- [x] Initialize {{< var vm.product >}} `Dataset` and `Model` objects +- [x] Run out-of-the-box tests with the {{< var validmind.developer >}} +- [x] Log test results to the {{< var validmind.platform >}} +- [x] Insert logged test results into your model's documentation + +::: +:::: + +::: {.tc} +
+Continue your model development journey with: +

+ +[Implementing custom tests](implementing-custom-tests.html){.button target="_blank"} +::: \ No newline at end of file diff --git a/site/training/developer-fundamentals/using-validmind-for-model-development.qmd b/site/training/developer-fundamentals/using-validmind-for-model-development.qmd new file mode 100644 index 0000000000..f7fcdbf2d7 --- /dev/null +++ b/site/training/developer-fundamentals/using-validmind-for-model-development.qmd @@ -0,0 +1,367 @@ +--- +title: "Using {{< var vm.product >}}
for Model Development" +subtitle: "Developer Fundamentals — Module 1 of 4

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" +lightbox: true +format: + revealjs: + controls: true + controls-tutorial: true + help: true + controls-back-arrows: visible + transition: slide + theme: [default, ../assets/slides.scss] + slide-number: true + chalkboard: false + preview-links: auto + view-distance: 2 + logo: /validmind.png + footer: "{{< var validmind.training >}} | [Home {{< fa person-walking-dashed-line-arrow-right >}}](/training/training.qmd)" + html: + # Change this to the file name prepended by a _ to get around the global HTML output settings required by _metadata.yml + output-file: _using-validmind-for-model-development.html + search: false +title-slide-attributes: + data-background-color: "#083E44" + data-background-image: "../assets/home-hero.svg" +skip_preview: true +--- + +# Learning objectives {.center} + +_"As a **developer** who is new to {{< var vm.product >}}, I want to learn how to register a model in the {{< var validmind.platform >}}, install the {{< var validmind.developer >}} in my local environment, and preview my model's documentation template."_ + +::: {.tc} +
+This first module is part of a four-part series: +

+[Developer Fundamentals](/training/developer-fundamentals/developer-fundamentals-register.qmd){.button target="_blank"} +::: + + +## Module 1 — Contents {.center} + +::: {.f2} +1. [{{< var vm.product >}} for model development](#validmind-for-model-development) +2. [Register a model in the {{< var validmind.platform >}}](#register-a-model) +3. [Install the {{< var validmind.developer >}}](#install-the-validmind-library) +4. [Preview your model's documentation template](#preview-model-documentation) + +::: + +First, let's make sure you can log in to {{< var vm.product >}}. + +{{< include /training/assets/_revealjs-navigation.qmd >}} + +## Before you begin {.center} + +::: {.panel-tabset} + +### Registration + +To continue, you need to have been [onboarded](developer-fundamentals-register.qmd#register){target="_blank"} onto {{< var validmind.training >}} with the [**{{< fa code >}} Developer**]{.bubble} role. + +
+ +:::: {.tc .mt5 .f2 .embed} +Already logged in and refreshed this module? Click [{{< fa chevron-right >}}]() to continue. + +::: + + +### Log in + +1. Log in to check your access: + +:::: {.flex .flex-wrap .justify-around} + +::: {.w-50-ns .tc} + +[Log in to JupyterHub](https://jupyterhub.validmind.ai/){.button target="_blank"} + +::: + +::: {.w-50-ns .tc} +[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai){.button target="_blank"} +::: + +:::: + + +::: {.tc .f3} +Be sure to return to this page afterwards. +::: + +2. After you successfully log in, refresh the page to connect this training module up to the {{< var validmind.platform >}}: + +::: {.tc} + + +::: + +::: + +
+ + + +# ValidMind for model development {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Jupyter Notebook series + +::: {.f3} +When you run these notebooks, they will generate a draft of model documentation and upload it to {{< var vm.product >}}, complete with test supporting test results. + +::: +::: + +::: {.column width="70%" .bl .pl4 .f3} +### {{< var vm.product >}} for model development + +Our series of four introductory notebooks for model developers include sample code and how-to information to get you started with {{< var vm.product >}}: + +1 — [Set up the {{< var validmind.developer >}}](/notebooks/tutorials/model_development/1-set_up_validmind.ipynb){target="_blank"}
+2 — [Start the model development process](/notebooks/tutorials/model_development/2-start_development_process.ipynb){target="_blank"}
+3 — [Integrate custom tests](/notebooks/tutorials/model_development/3-integrate_custom_tests.ipynb){target="_blank"}
+4 — [Finalize testing and documentation](/notebooks/tutorials/model_development/4-finalize_testing_documentation.ipynb){target="_blank"}
+
+ +::: {.f4 .pl3 .pr3 .embed} +In this first module, we'll run through **1 — Set up the {{< var validmind.developer >}}** together. +::: + +::: +:::: + +
+ +Let's start our journey with **1 — Set up the {{< var validmind.developer >}}** on the next page. {{< fa hand-point-right >}} + +## {background-iframe="/notebooks/EXECUTED/model_development/1-set_up_validmind.html" background-interactive="yes" data-preload="yes"} + +:::: {.absolute bottom=15 left=0 right=50 .w-100 .f4 .tc .pl4 .pr4 .overlay} +**1 — Set up the {{< var validmind.developer >}}** + +::: {.f5} +During this course, we'll run through these notebooks together, and at the end of your learning journey you'll have a fully documented sample model ready for review. +::: + +For now, **scroll through this notebook** to explore. When you are done, click [{{< fa chevron-right >}}]() to continue. + +:::: + + + +# Register a model {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {background-iframe="https://app.prod.validmind.ai" background-interactive="true" data-preload="yes"} + +::: {.fr .f4 .nr5 .pa5 .overlay} +**Welcome to the {{< var validmind.platform >}}** + +From here, you can: + +- Keep track of your models in the
customizable inventory ... +- Review and edit model documentation
generated via the {{< var validmind.developer >}} ... +- Collaborate with model validators
to get your model approved ... +- ... and much more! + +::: {.f5 .pl3 .pr3 .embed} +**Can't load the {{< var validmind.platform >}}?** + +Make sure you're logged in and have
refreshed the page. +::: + +When you're done navigating around, +
click [{{< fa chevron-right >}}]() to continue. + +::: + + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.fr .f4 .mv5 .nr5 .pa4 .overlay} +**Welcome to the model inventory** + +Use the model inventory to track
+comprehensive details for all your
+models throughout the model lifecycle. + +The model inventory is customizable
+and extensible, with a layout that
+can be configured to suit your needs. + +::: {.f5 .pl3 .pr3 .embed} +To start the documentation process,
+a model must already be registered
+in the model inventory via the
+**+ Register Model** modal. +::: + +**Let's register a model together** on
+the next page. {{< fa hand-point-right >}} + +:::: + +## {background-iframe="https://app.prod.validmind.ai/model-inventory/?register=open" background-interactive="true" data-preload="yes"} + +:::: {.fr .f4 .mv5 .nr5 .pa4 .overlay} +**Register a binary
classification model** + +1. Select the option
+for a new model: + +::: {.f5 .nt2 .pl2} +- **Documentation template** —
`Binary classification` +- **Use case** —
`Attrition/Churn Management` + + You can fill in other options
according to your preference. +::: + +2. Click **Register Model** to
+add the model to your
+inventory. + +When you're done,
click [{{< fa chevron-right >}}]() to continue. + +:::: + + + + +# Install the ValidMind Library {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## Get your code snippet + +:::: {.columns} + +::: {.column width="80%"} + + + +
+ +
+ +::: + +::: {.column width="20%" .f4} + +::: {.f5} +{{< var vm.product >}} generates a unique *code snippet* for each registered model to connect with your developer environment: + +1. From the **{{< fa cubes >}} Inventory**, select the name of your model to open up the model details page. +2. On the left sidebar that appears for your model, click **Getting Started**. +3. Locate the code snippet and click **Copy snippet to clipboard**. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +::: + +:::: + +:::: {.tc .f6 .embed} +**Can't load the {{< var validmind.platform >}}?** + +Make sure you're logged in and have refreshed the page in a Chromium-based web browser. + +::: + +## {background-iframe="/notebooks/EXECUTED/model_development/1-set_up_validmind.html#install-the-validmind-library" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Install & initialize the {{< var validmind.developer >}}** + +::: {.f5} +With your code snippet copied to your clipboard: + +1. Open **1 — Set up the {{< var validmind.developer >}}**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/1-set_up_validmind.ipynb){target="_blank"} +2. Run all the cells in the sections under **Initializing the {{< var validmind.developer >}}**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + + + + +# Preview model documentation {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {background-iframe="/notebooks/EXECUTED/model_development/1-set_up_validmind.html#preview-the-documentation-template" data-preload="yes"} + +:::: {.absolute bottom=15 .w-100 .f4 .tc .pl4 .overlay} +**Preview the documentation template** + +::: {.f5} +You can preview your model's documentation template right from the {{< var validmind.developer >}}: + +1. Continue with **1 — Set up the {{< var validmind.developer >}}**: [{{< fa square-arrow-up-right >}} JupyterHub](https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/1-set_up_validmind.ipynb){target="_blank"} +2. Run all the cells in the sections under **Getting to know ValidMind**. + +::: + +When you're done, return to this page and click [{{< fa chevron-right >}}]() to continue. + +:::: + +## {background-iframe="https://app.prod.validmind.ai/model-inventory" background-interactive="true" data-preload="yes"} + +:::: {.absolute bottom=0 left=50 right=50 .w-95 .f4 .tc .pl4 .overlay} +**Verify the documentation template** + +::: {.f5} +Once you've called `preview_template()`: + +1. From the model inventory, select the name of your model to open up the model details page. +2. On the left sidebar that appears for your model, click **Documentation**. +3. Note how the structure of the model documentation reflects the previewed template. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +:::: + +# In summary {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +Using {{< var vm.product >}} for model development + +::: + +::: {.column width="70%" .bl .pl4 .f3} +In this first module, you learned how to: + +- [x] Register a model in the {{< var validmind.platform >}} +- [x] Install the {{< var validmind.developer >}} in your environment +- [x] Connect to your registered model in the {{< var validmind.platform >}} +- [x] Preview and verify your model's documentation template + +::: +:::: + +::: {.tc} +
+Continue your model development journey with: +

+ +[Learning to run tests](learning-to-run-tests.html){.button target="_blank"} +::: \ No newline at end of file diff --git a/site/training/program/learning-paths.qmd b/site/training/program/learning-paths.qmd index 164ef65af0..4b9ec714cc 100644 --- a/site/training/program/learning-paths.qmd +++ b/site/training/program/learning-paths.qmd @@ -16,60 +16,39 @@ Develop role-based expertise for your entire organization. ### Developer Fundamentals -As a developer who is new to ValidMind, learn how to generate model documentation, add your own tests, edit the content, and then submit your documentation for approval. - -::: {.attn .hidden} :::: {.flex .flex-wrap .justify-around} -::: {.w-30-ns} -{{< fa check >}} **Course content** +::: {.w-80-ns} +Learn how to use {{< var vm.product >}} as a **developer** to generate model documentation, automate testing, and track your model's progress through the model lifecycle. -- Split 3 modules -> 4 modules -- Expand process overview content -- Expand template content to include editing templates and adding tests -- Rename "Module 2: Start Documenting & Testing" to "Module 2: Start Documenting" -- Add missing content for "Register a model" in Module 1 ::: -::: {.w-30-ns} -{{< fa check >}} **Videos ** - -- 10 videos -- Minor update to collaboration video (multi-person comment threads) -::: - -::: {.w-30-ns .pl4 .bl} -**Notes** +::: {.w-20-ns .tc} +[register now](/training/developer-fundamentals/developer-fundamentals-register.qmd){.button-green .cta target="_blank"} -- FUTURE: 1 intro notebook -> 4 notebooks that progressively build on each other ::: :::: -::: ::: {.attn} :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 1
Initialize the ValidMind Library +#### Module 1
Using {{< var vm.product >}} for Model Development -- Install the ValidMind Library -- Initialize your development environment -- Register a model -- Get your code snippet -- Work with documentation templates +- Register a model in the {{< var validmind.platform >}} +- Install the {{< var validmind.developer >}} in your environment +- Connect to your registered model +- Preview your model's documentation template ::: ::: {.w-50-ns} -#### Module 2
Start Documenting & Testing +#### Module 2
Learning to Run Tests -- Initialize your datasets -- Run tabular data tests -- Utilize test output -- Document results based on datasets -- Test your model - -- Run the model evaluation tests +- Identify relevant tests to run from {{< var vm.product >}}'s test vault +- Initialize datasets & models for use with the {{< var validmind.developer >}} +- Run and log out-of-the-box tests for new and existing models +- Insert test results into your model's documentation ::: :::: @@ -77,22 +56,19 @@ As a developer who is new to ValidMind, learn how to generate model documentatio :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 3
Implement Custom Tests +#### Module 3
Implementing Custom Tests -- Create a confusion matrix plot -- Add parameters to custom tests -- Pass parameters to custom tests -- Log the confusion matrix results -- Using external test providers -- Initializing a local test provider +- Implement, run, and log custom inline and reusable tests +- Refresh your template to include inserted test-driven blocks +- Configure your template to include custom tests ::: ::: {.w-50-ns} -#### Module 4
Finalize Testing & Documentation +#### Module 4
Finalizing Model Documentation -- Include custom test results in your documentation -- Viewing and updating the configuration for the entire model documentation template -- Update the config +- Refine your model documentation +- Submit your model documentation for approval +- Track changes and other updates to your model ::: :::: @@ -100,58 +76,41 @@ As a developer who is new to ValidMind, learn how to generate model documentatio ### Validator Fundamentals -As a validator who is new to ValidMind, learn how to review model documentation, prepare your validation report, track issues, and submit your report for approval. - -::: {.attn .hidden} - :::: {.flex .flex-wrap .justify-around} -::: {.w-30-ns} -{{< fa check >}} **Course content** +::: {.w-80-ns} +Learn how to use {{< var vm.product >}} as a **validator** to generate validation reports, automate testing, and collaborate with your model development team. -- Split 2 modules -> 3 modules -- Add missing content: - - Preview templates for validation reports - - How to work with validation reports - - Expand process overview content ::: -::: {.w-30-ns} -{{< fa minus >}} **Video** +::: {.w-20-ns .tc} +[register now](/training/validator-fundamentals/validator-fundamentals-register.qmd){.button-green .cta target="_blank"} -- 5 videos -- Replace stale '101' intro -- Review for stale content -::: - -::: {.w-30-ns} - ::: :::: -::: ::: {.attn} :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 1
Reviewing & Analyzing Models +#### Module 1
Using {{< var vm.product >}} for Model Validation + +- Install the {{< var validmind.developer >}} in your environment +- Connect to a champion model as a validator +- Preview the model's validation report template +- Review submitted model documentation -- Access validation reports -- Preview templates for validation reports -- How to work with validation reports ::: ::: {.w-50-ns} -#### Module 2
Reviewing & Analyzing Models +#### Module 2
Running Data Quality Tests + +- Identify relevant tests to run from {{< var vm.product >}}'s test vault +- Initialize datasets for use with the {{< var validmind.developer >}} +- Run and log out-of-the-box tests on your datasets +- Insert test results into your model’s validation report -- Reviewing model documentation -- Analyzing test results -- Add findings -- Link evidence -- Assessing compliance ::: :::: @@ -159,13 +118,20 @@ As a validator who is new to ValidMind, learn how to review model documentation, :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 3
Issue Resolution & Submission +#### Module 3
Developing Challenger Models + +- Initialize models for use with the {{< var validmind.developer >}} +- Run and log out-of-the box and custom tests for your models +- Use the results of tests to log model findings -- Tracking issue resolution -- Submitting validation reports for approval ::: ::: {.w-50-ns} +#### Module 4
Finalizing Validation Reports + +- Make qualitative edits to your validation report +- Submit your validation report for approval +- Track finding resolution and other updates to your model ::: @@ -174,7 +140,19 @@ As a validator who is new to ValidMind, learn how to review model documentation, ### Administrator Fundamentals -As an administrator who is new to ValidMind, learn how to onboard your organization, manage users and roles, and manage permissions for specific roles. +:::: {.flex .flex-wrap .justify-around} + +::: {.w-80-ns} +Learn how to use {{< var vm.product >}} as an **administrator** to onboard your organization, manage users, roles, and permissions, and customize the {{< var vm.platform >}} for your institution's requirements. + +::: + +::: {.w-20-ns .tc} +[register now](/training/administrator-fundamentals/administrator-fundamentals-register.qmd){.button-green .cta target="_blank"} + +::: + +:::: ::: {.attn .hidden} @@ -211,7 +189,7 @@ As an administrator who is new to ValidMind, learn how to onboard your organizat :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 1
Onboarding +#### Module 1
{{< var validmind.platform >}} Onboarding - Setting up an organization - Inviting users via email @@ -229,14 +207,14 @@ As an administrator who is new to ValidMind, learn how to onboard your organizat :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 3
Workflows +#### Module 3
Lifecycle Statuses & Workflows -- Configuring workflows +- Configuring statuses & workflows - Assigning users to workflows ::: ::: {.w-50-ns} -#### Module 4
Templates & Model Inventory +#### Module 4
Templates & Inventory Fields - Customize templates - Configure model inventory fields @@ -247,7 +225,20 @@ As an administrator who is new to ValidMind, learn how to onboard your organizat ### Monitoring Fundamentals
[coming soon]{.smallercaps .pink} -As a monitoring user who is new to ValidMind, learn how to generate ongoing monitoring documentation, schedule monitoring runs, and review the results. +:::: {.flex .flex-wrap .justify-around} + +::: {.w-80-ns} +Learn how to use {{< var vm.product >}} to manage the **ongoing monitoring** of your models with ongoing monitoring plans, scheduled monitoring runs, and tracked metrics over time. + +::: + +::: {.w-20-ns .tc} +[coming soon]{.button .cta target="_blank"} + +::: + +:::: + ::: {.attn .hidden} :::: {.flex .flex-wrap .justify-around} @@ -278,7 +269,7 @@ As a monitoring user who is new to ValidMind, learn how to generate ongoing moni :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 1
Ongoing Monitoring of Models +#### Module 1
Using {{< var vm.product >}} for Ongoing Monitoring - How monitoring integrates with model documentation - Enable monitoring for a model @@ -286,7 +277,7 @@ As a monitoring user who is new to ValidMind, learn how to generate ongoing moni ::: ::: {.w-50-ns} -#### Module 2
Monitoring Setup +#### Module 2
Setting Up Monitoring - Load the reference and monitoring datasets - Load the production model @@ -315,11 +306,24 @@ As a monitoring user who is new to ValidMind, learn how to generate ongoing moni ## Standalone courses -Useful skills for anyone who wants to use ValidMind. +Useful skills for anyone who wants to use {{< var vm.product >}}. ### {{< var vm.product >}} Guided Tour
[coming soon]{.smallercaps .pink} -As a new user of ValidMind, learn how to navigate the platform's key features and understand how they work together to support AI governance and compliance with regulations like the EU AI Act. +:::: {.flex .flex-wrap .justify-around} + +::: {.w-80-ns} +Learn how to navigate the {{< var vm.product >}}'s key features and understand how they work together to support AI governance and compliance with regulations like the EU AI Act. + +::: + +::: {.w-20-ns .tc} +[coming soon]{.button .cta target="_blank"} + +::: + +:::: + ::: {.attn .hidden} @@ -354,7 +358,7 @@ As a new user of ValidMind, learn how to navigate the platform's key features an :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 1
Getting Started with the Model Inventory +#### Module 1
The {{< var vm.product >}} Model Inventory - Understand how the model inventory serves as your central hub for registered models - Learn to register new models with appropriate templates @@ -363,7 +367,7 @@ As a new user of ValidMind, learn how to navigate the platform's key features an ::: ::: {.w-50-ns} -#### Module 2
Model Documentation and Testing +#### Module 2
Model Documentation & Testing - Navigate the documentation templates and structure - Explore the library of 250+ out-of-the-box tests @@ -376,7 +380,7 @@ As a new user of ValidMind, learn how to navigate the platform's key features an :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 3
Validation and Collaboration +#### Module 3
Validation & Collaboration - Create and manage validation reports - Learn to assess compliance and link evidence @@ -385,7 +389,7 @@ As a new user of ValidMind, learn how to navigate the platform's key features an ::: ::: {.w-50-ns} -#### Module 4
Monitoring and Governance +#### Module 4
Monitoring & Governance - Set up ongoing monitoring for production models - Track model performance and identify data drift @@ -398,7 +402,20 @@ As a new user of ValidMind, learn how to navigate the platform's key features an ### {{< var vm.product >}} Content Features
[coming soon]{.smallercaps .pink} -As a content author who is new to ValidMind, learn how to use the content features to enhance my documentation, and to enable or disable text generation with GenAI. +:::: {.flex .flex-wrap .justify-around} + +::: {.w-80-ns} +Learn how to use {{< var vm.product >}}'s content features to enhance documentation, and to enable or disable text generation with GenAI. + +::: + +::: {.w-20-ns .tc} +[coming soon]{.button .cta target="_blank"} + +::: + +:::: + ::: {.attn .hidden} @@ -430,14 +447,14 @@ As a content author who is new to ValidMind, learn how to use the content featur :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns} -#### Module 1
Using text generation +#### Module 1
Using Text Generation - When to use GenAI to draft content - How to turn GenAI on and off ::: ::: {.w-50-ns} -#### Module 2
Enhancing content +#### Module 2
Enhancing Content - How to edit content - Write optional assessment prompts diff --git a/site/training/program/program-overview.qmd b/site/training/program/program-overview.qmd index 762f85adf6..1ba3adccd4 100644 --- a/site/training/program/program-overview.qmd +++ b/site/training/program/program-overview.qmd @@ -20,7 +20,7 @@ listing: - sample-training-plan.qmd --- -Empower your team with ValidMind's comprehensive training program, designed to accelerate adoption and maximize value. +Empower your team with {{< var vm.product >}}'s comprehensive training program, designed to accelerate adoption and maximize value. ::: {.attn} ## {{< fa check >}} Training that delivers @@ -35,7 +35,7 @@ Our training program is built on three pillars: ## Invest in your success -ValidMind's training program ensures your organization can: +{{< var vm.product >}}'s training program ensures your organization can: - Accelerate time-to-value with structured learning paths - Build internal expertise across key roles @@ -134,6 +134,6 @@ Our training develops competency across your entire model risk management team: ::: {.feature} ## Get started -Ready to begin your ValidMind training journey? Contact your customer success representative to create a customized training plan for your organization. +Ready to begin your {{< var vm.product >}} training journey? Contact your customer success representative to create a customized training plan for your organization. ::: diff --git a/site/training/program/sample-training-plan.qmd b/site/training/program/sample-training-plan.qmd index 03162919a7..5701dcefaa 100644 --- a/site/training/program/sample-training-plan.qmd +++ b/site/training/program/sample-training-plan.qmd @@ -4,7 +4,7 @@ sidebar: validmind-training description: "Sample 3-week training plan for enterprise deployment with enhanced support options." --- -This enterprise training plan demonstrates how ValidMind can be deployed across a large organization using both standard and enhanced training options. The plan includes comprehensive role-based training, custom workshops, and ongoing support to ensure successful adoption. +This enterprise training plan demonstrates how {{< var vm.product >}} can be deployed across a large organization using both standard and enhanced training options. The plan includes comprehensive role-based training, custom workshops, and ongoing support to ensure successful adoption. ## Timeline overview @@ -64,7 +64,7 @@ This enterprise training plan demonstrates how ValidMind can be deployed across | Module | Format | Duration | Delivery | |--------|---------|----------|-----------| -| ValidMind 101 | Live + Labs | 2 hours | Regional tracks | +| {{< var vm.product >}} 101 | Live + Labs | 2 hours | Regional tracks | | Developer foundations | Interactive | 4 hours | Role-specific | | Validator essentials | Interactive | 4 hours | Role-specific | | Admin core skills | Interactive | 4 hours | Role-specific | @@ -125,6 +125,6 @@ This enterprise training plan demonstrates how ValidMind can be deployed across 3. Complete pre-training assessments 4. Begin foundation phase rollout -Contact your ValidMind representative to customize this plan for your organization's specific needs and timeline requirements. +Contact your {{< var vm.product >}} representative to customize this plan for your organization's specific needs and timeline requirements. ::: \ No newline at end of file diff --git a/site/training/program/training-faq.qmd b/site/training/program/training-faq.qmd index 481897b9c4..e0ba5ebfe6 100644 --- a/site/training/program/training-faq.qmd +++ b/site/training/program/training-faq.qmd @@ -3,7 +3,7 @@ title: "Where do I find … ?" sidebar: validmind-training --- -Here's what we have been asked about ValidMind during training sessions. +Here's what we have been asked about {{< var vm.product >}} during training sessions. ## Answers by topic diff --git a/site/training/training-templates/course-registration.qmd b/site/training/training-templates/course-registration.qmd index 5e9adc58f5..9212ca3907 100644 --- a/site/training/training-templates/course-registration.qmd +++ b/site/training/training-templates/course-registration.qmd @@ -4,9 +4,49 @@ subtitle: For {{< var vm.product >}} date: last-modified # REMOVE THE SEARCH FALSE TOGGLE WHEN THE COURSE IS READY TO BE PUBLISHED search: false +listing: + - id: modules + type: grid + grid-columns: 2 + max-description-length: 500 + sort: false + table-hover: true + image-align: left + contents: + # IMPORTANT: USE THE .HTML PATH AND NOT THE .QMD PATH FOR THE REVEALJS OUTPUT + - path: course-slides.html + title: "Title one" + subtitle: "Module 1" + description: "{{< fa check >}} Bullet 1
{{< fa check >}} Bullet 2
{{< fa check >}} Bullet 3
{{< fa check >}} Bullet 4
{{< fa check >}} Bullet 5" + reading-time: "30" + author: "{{< var vm.product >}}" + - path: course-slides.html + title: "Title two" + subtitle: "Module 2" + description: "{{< fa check >}} Bullet 1
{{< fa check >}} Bullet 2
{{< fa check >}} Bullet 3
{{< fa check >}} Bullet 4
{{< fa check >}} Bullet 5" + reading-time: "30" + author: "{{< var vm.product >}}" + - path: course-slides.html + title: "Title 3" + subtitle: "Module 3" + description: "{{< fa check >}} Bullet 1
{{< fa check >}} Bullet 2
{{< fa check >}} Bullet 3
{{< fa check >}} Bullet 4
{{< fa check >}} Bullet 5" + reading-time: "30" + author: "{{< var vm.product >}}" + - path: course-slides.html + title: "Title 4" + subtitle: "Module 4" + description: "{{< fa check >}} Bullet 1
{{< fa check >}} Bullet 2
{{< fa check >}} Bullet 3
{{< fa check >}} Bullet 4
{{< fa check >}} Bullet 5" + reading-time: "30" + author: "{{< var vm.product >}}" + fields: [title, subtitle, description, reading-time] --- -As a {{ role }} new to {{< var vm.product >}}, learn how to {{ course_overview }}. +Learn how to use {{< var vm.product >}} as a {{ role }}, to {{ list_tasks }}. + +::: {.column-margin} +{{< include /training/_compatibility.qmd >}} + +::: ::: {.attn} @@ -15,31 +55,23 @@ As a {{ role }} new to {{< var vm.product >}}, learn how to {{ course_overview } :::: {.flex .flex-wrap .justify-around} ::: {.w-50-ns .pr3} -### Learn how to - -- {{ learning_point_1 }} -- {{ learning_point_2 }} -- {{ learning_point_3 }} -- {{ learning_point_4 }} -- {{ learning_point_5 }} -- {{ learning_point_6 }} - -#### Duration +#### Total duration {{ duration }} minutes -[register](#register){.button-green .cta} ::: -::: {.w-50-ns .pa3} +::: {.w-50-ns .pa3 .tr} -::: {.preview source="course-slides.qmd"} -::: +[register now](#register){.button-green .cta} ::: :::: +:::{#modules} +::: + ::: diff --git a/site/training/training-templates/course-slides.qmd b/site/training/training-templates/course-slides.qmd index c4e7887bcc..5e0862f273 100644 --- a/site/training/training-templates/course-slides.qmd +++ b/site/training/training-templates/course-slides.qmd @@ -1,6 +1,6 @@ --- -title: "{{ Topic }}
Template" -subtitle: "docs.validmind.ai/training

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" +title: "{{ Module }}
Template" +subtitle: "{{Course name}} — Module # of #

_Click [{{< fa chevron-right >}}](#learning-objectives) to start_" lightbox: true # REMOVE THE SEARCH FALSE TOGGLE WHEN THE COURSE IS READY TO BE PUBLISHED search: false @@ -28,11 +28,33 @@ title-slide-attributes: skip_preview: true --- -# Learning objectives +# Learning objectives {.center} -_"As a {{ role }} who is new to {{< var vm.product >}}, I want to learn how to {{ task A }}, {{ task B }}, {{ task C }}, and {{ task D}}."_ +_"As a {{ role }} who ... {{< var vm.product >}}, I want to learn how to {{ task A }}, {{ task B }}, {{ task C }}, and {{ task D}}."_ -## In this course +::: {.tc} +
+This {{#th}} module is part of a {{#}}-part series: +

+ +[Course Link](/training/training-templates/course-registration.qmd){.button target="_blank"} +::: + +## In this course {.center} + +:::: {.columns .f4} +::: {.column width="50%" .mt4 .pr4} +### Introduction +- [Before you begin](#before-you-begin) + +::: + +:::: + +:::: {.columns .f4} + +::: {.column width="50%" .mt4 .pr4} +### Section 1 1. [iFrame embed right](#iframe-embed-right) 2. [iFrame embed bottom](#iframe-embed-bottom) @@ -40,27 +62,78 @@ _"As a {{ role }} who is new to {{< var vm.product >}}, I want to learn how to { 4. [Scrollable single user guide](#scrollable-single-user-guide) 5. [Scrollable tabset user guides](#scrollable-tabset-user-guide) +::: + +::: {.column width="50%" .mt4} +### Section 2 + +Example embedded `iframe` for interactive copy/paste workaround: + +- [Get your code snippet](#get-your-code-snippet) + +Topic summary & next steps: + +- [Next steps](#next-steps) +::: + +:::: + First, let's make sure you can log in to {{< var vm.product >}}. {{< include /training/assets/_revealjs-navigation.qmd >}} -## Can you log in? +## Before you begin {.center} - +::: {.panel-tabset} -To try out this course, you need to have been [onboarded](course-registration.qmd#register) onto {{< var validmind.training >}} with the [**{{< fa check >}} Role Name**]{.bubble} role. +### Registration -
Log in to check your access: + -::: {.tc} -[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai/){.button target="_blank"} +To continue, you need to have been [onboarded](course-registration.qmd#register){target="_blank"} onto {{< var validmind.training >}} with the [**{{< fa check >}} Role Name**]{.bubble} role.
+ +:::: {.tc .mt5 .f2 .embed} +Already logged in and refreshed this module? Click [{{< fa chevron-right >}}]() to continue. + +::: + +### Log in + +1. Log in to check your access: + +:::: {.flex .flex-wrap .justify-around} + +::: {.w-50-ns .tc} + +[Log in to JupyterHub](https://jupyterhub.validmind.ai/){.button target="_blank"} + +::: + +::: {.w-50-ns .tc} +[Log in to {{< var vm.product >}}](https://app.prod.validmind.ai){.button target="_blank"} +::: + +:::: + + +::: {.tc .f3} Be sure to return to this page afterwards. +::: + +2. After you successfully log in, refresh the page to connect this training module up to the {{< var validmind.platform >}}: + +::: {.tc} + ::: +::: + +# Section 1 {background-color="#083E44" background-image="/assets/img/about-us-esphere.svg"} + # iFrame embed right {background-color="#083E44" background-image="/training/assets/home-hero.svg"} ## {background-iframe="https://app.prod.validmind.ai/" data-preload="yes"} @@ -156,11 +229,11 @@ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed quis sapien blandit # Scrollable tabset user guides {background-color="#083E44" background-image="/training/assets/home-hero.svg"} -## {.scrollable} +## {.scrollable .center} :::: {.columns} ::: {.column width="30%" .pr4 .f2} -You can also embed several sets of instructions into the same two-column layout. +You can also embed several sets of instructions into the same two-column layout, and vertically center content. ::: {.tc} @@ -191,16 +264,86 @@ All you need to do is make sure that the headers within `.panel-tabset` are set Try it **live** on the next page. {{< fa hand-point-right >}} +# Section 2 {background-color="#083E44" background-image="/assets/img/about-us-esphere.svg"} -# Topic summary slide +## Get your code snippet -Summarize the benefits of {{< var vm.product >}} for the role or topic you just covered here, then link to the relevant user guides below. +:::: {.columns} -::: {.tc} - +::: {.column width="80%"} + + + +
+ +
+ +::: + +::: {.column width="20%" .f4} + +::: {.f5} +{{< var vm.product >}} generates a unique *code snippet* for each registered model to connect with your developer environment: + +1. From the **{{< fa cubes >}} Inventory**, select the name of your model to open up the model details page. +2. On the left sidebar that appears for your model, click **Getting Started**. +3. Locate the code snippet and click **Copy snippet to clipboard**. + +::: + +When you're done, click [{{< fa chevron-right >}}]() to continue. + +::: + +:::: + +:::: {.tc .f6 .embed} +**Can't load the {{< var validmind.platform >}}?** + +Make sure you're logged in and have refreshed the page in a Chromium-based web browser. + +::: + +# In summary {background-color="#083E44" background-image="/training/assets/home-hero.svg"} + +## {.scrollable .center} + +:::: {.columns} +::: {.column width="30%" .pr4 .f2} +{{Module name}} + +::: {.f3} +
Want to learn more? Find your next learning resource on [{{< fa graduation-cap >}} {{< var validmind.training >}}](/training/training.qmd){target="_blank"}. + +::: -[Change this link and text](/guide/guides.qmd){.button target="_blank"} ::: + +::: {.column width="70%" .bl .pl4 .f3} +In this {{#th}} module, you learned how to: + +- [x] task +- [x] task +- [x] task +- [x] task + +::: +:::: + +::: {.f2}
+There is more that {{< var vm.product >}} can do to help you {{perform task}}, including {{...}}: +::: + +::: {.tc} +[More docs](/index.qmd){.button target="_blank"} -Or, find your next learning resource on [{{< var validmind.training >}}](/training/training.qmd). \ No newline at end of file +::: \ No newline at end of file diff --git a/site/training/training.qmd b/site/training/training.qmd index 4ed4c33538..9eb6d505cd 100644 --- a/site/training/training.qmd +++ b/site/training/training.qmd @@ -45,28 +45,28 @@ listing: subtitle: "Open notebook in JupyterHub {{< fa chevron-right >}}" description: "Gets you started with the basic process of documenting models with {{< var vm.product >}}, from the {{< var vm.developer >}} to the {{< var vm.platform >}}." categories: ["[Demo] Customer Churn Model"] - reading-time: "10" + reading-time: "20" author: "{{< var vm.product >}}" - - path: https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/intro_for_model_developers.ipynb - title: "{{< var vm.product >}} introduction for model developers" - subtitle: "Open notebook in JupyterHub {{< fa chevron-right >}}" + - path: https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/tutorials/model_development/101-set_up_validmind.ipynb + title: "{{< var vm.product >}} for model development (4-part series)" + subtitle: "Open series in JupyterHub {{< fa chevron-right >}}" description: "Learn how the end-to-end documentation process works based on common scenarios you encounter in model development settings." categories: ["[Demo] Customer Churn Model"] - reading-time: "27" + reading-time: "60" author: "{{< var vm.product >}}" - path: https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/code_samples/credit_risk/application_scorecard_demo.ipynb title: "Document an application scorecard model" subtitle: "Open notebook in JupyterHub {{< fa chevron-right >}}" description: "Guides you through building and documenting an application scorecard model using the Lending Club sample dataset from Kaggle." categories: ["[Demo] Credit Risk Model"] - reading-time: "16" + reading-time: "30" author: "{{< var vm.product >}}" - path: https://jupyterhub.validmind.ai/hub/user-redirect/lab/tree/code_samples/nlp_and_llm/foundation_models_integration_demo.ipynb title: "Prompt validation for large language models (LLMs)" subtitle: "Open notebook in JupyterHub {{< fa chevron-right >}}" description: "Run and document prompt validation tests for a large language model (LLM) specialized in sentiment analysis for financial news." categories: ["[Demo] Foundation Model - Text Sentiment Analysis"] - reading-time: "8" + reading-time: "15" author: "{{< var vm.product >}}" --- diff --git a/site/training/validator-fundamentals/validator-fundamentals-register.qmd b/site/training/validator-fundamentals/validator-fundamentals-register.qmd index 6ce257052a..69e9119fbe 100644 --- a/site/training/validator-fundamentals/validator-fundamentals-register.qmd +++ b/site/training/validator-fundamentals/validator-fundamentals-register.qmd @@ -6,7 +6,12 @@ aliases: - training-for-model-validators.html --- -As a validator who is new to {{< var vm.product >}}, learn how to review model documentation, prepare your validation report, track issues, and submit your report for approval. +Learn how to use {{< var vm.product >}} as a **validator** to generate validation reports, automate testing, and collaborate with your model development team. + +::: {.column-margin} +{{< include /training/_compatibility.qmd >}} + +::: ::: {.attn}