diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 106af91..56b4c76 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -11,7 +11,7 @@ "enableNonRootDocker": "true" } }, - "postCreateCommand": "pip3 install --user -r requirements.txt", + "postCreateCommand": "pip3 install --user -r requirements.txt && curl -fsSL https://aka.ms/install-azd.sh | bash", "customizations": { "vscode": { "extensions": [ diff --git a/challenge-0/README.md b/challenge-0/README.md index ebf94f6..30e9bbf 100644 --- a/challenge-0/README.md +++ b/challenge-0/README.md @@ -2,16 +2,14 @@ **Expected Duration:** 30 minutes -Welcome to your very first challenge! Your goal in this challenge is to create the services and enviornment necessary to conduct this hackathon. You will deploy the required resources in Azure, create your development enviornment and all the assets necessary for the subsequent challenges. By completing this challenge, you will set up the foundation for the rest of the hackathon. +Welcome to your very first challenge! Your goal in this challenge is to create the services and enviornment necessary to conduct this hackathon. You will deploy the required resources in Azure, create your development enviornment and all the assets necessary for the subsequent challenges. By completing this challenge, you will set up the foundation for the rest of the hackathon. If something is not working correctly, please do let your coach know! - ## 1.1 Fork the Repository Before you start, please fork this repository to your GitHub account by clicking the `Fork` button in the upper right corner of the repository's main screen (or follow the [documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo#forking-a-repository)). This will allow you to make changes to the repository and save your progress. - ## 1.2 Development Environment GitHub Codespaces is a cloud-based development environment that allows you to code from anywhere. It provides a fully configured environment that can be launched directly from any GitHub repository, saving you from lengthy setup times. You can access Codespaces from your browser, Visual Studio Code, or the GitHub CLI, making it easy to work from virtually any device. @@ -24,40 +22,43 @@ Please select your forked repository from the dropdown and, if necessary, adjust **NOTE:** If GitHub Codespaces is not enabled in your organization, you can enable it by following the instructions [here](https://docs.github.com/en/codespaces/managing-codespaces-for-your-organization/enabling-or-disabling-github-codespaces-for-your-organization), or, if you cannot change your GitHub organization's settings, create a free personal GitHub account [here](https://github.com/signup?ref_cta=Sign+up&ref_loc=header+logged+out&ref_page=%2F&source=header-home). The Github Free Plan includes 120 core hours per month, equivalent to 60 hours on a 2-core machine, along with 15 GB of storage. - ## 1.3 Resource Deployment Guide -The first step on this hackathon will be to create the resources we will use throughout the day. You can deploy using either the one-click button or manual method below. -Before anything else, let's log in into the CLI with our account. Please paste the code underneath and follow the necessary instructions. +We're now using the [Azure Developer CLI (azd)](https://learn.microsoft.com/azure/developer/azure-developer-cli/) to deploy the environment defined in `challenge-0/azure.yaml`. The Bicep template that provisions every resource lives in `challenge-0/infra/main.bicep`. -```bash -az login --use-device-code -``` +### Prerequisites -## 1.3.1 Resources Deployment +- Install the Azure CLI and Azure Developer CLI (`azd`). +- Ensure you have the required Azure permissions to create resources and role assignments in your subscription. -Now, time to deploy our resources to Azure! +### Deploy with azd -[![Deploy to Azure](https://aka.ms/deploytoazurebutton)](https://portal.azure.com/#create/Microsoft.Template/uri/https%3A%2F%2Fraw.githubusercontent.com%2Fmartaldsantos%2Fagentic-ai-hack%2Fmain%2Fchallenge-0%2Fiac%2Fazuredeploy.json) +From the repository root, run the following commands: -**Deployment Parameters:** -- servicePrincipalObjectId: Leave this field empty. -- resource group: Introduce the unique name for your resource group (example: rg-user01-yourinitials). +```powershell +azd auth login +azd env new --location swedencentral +azd up +``` -**NOTE:** Some parts of your deployment may fail if the resource provider `Microsoft.AlertsManagement` is not registered in your. Follow the [documentation](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider-1) to register it and the re-run the deployment. +- Replace `` with a friendly name (for example, `hackathon`). +- The `azd env new` command seeds environment configuration and prompts for the subscription and resource group name. The default location (`swedencentral`) aligns with the template. +- Create a new resource group when prompted (for example, `rg-aihackaton-` where `abc` are the first three letters of your last name). -Resource deployment can take up to 10 minutes, afterwards you'll be able to find most of the resources on your resource group. +The deployment usually completes within 10 minutes and creates all dependent role assignments automatically. If the deployment fails due to a missing resource provider (for example, `Microsoft.AlertsManagement`), register it and re-run `azd up`. ## 1.4 Verify the creation of your resources -Go back to your `Azure Portal` and find your `Resource Group`that should by now contain 9 resources and look like this: +Go back to your `Azure Portal` and find your `Resource Group` that should by now contain the storage account, Azure AI Foundry hub and project, Azure AI Search, Cosmos DB (serverless), Container Registry, Log Analytics workspace with Application Insights, and a Key Vault. ![alt text](image.png) ## 1.5 Let's retrieve the necessary keys + After deploying the resources, you will need to configure the environment variables in the `.env` file. Double check you have logged in into your Azure account on the CLI. If that's settled, let's move into retrieving our keys. The `.env` file is a configuration file that contains the environment variables for the application. The `.env` file is automatically created by running the following command within the terminal in your Codespace. **Then run the get-keys script with your resource group name:** + ```bash cd challenge-0 && ./get-keys.sh --resource-group YOUR_RESOURCE_GROUP_NAME ``` @@ -75,6 +76,7 @@ The repo has an `.env.sample` file that shows the relevant environment variables If the file is not created, simply copy over `.env.sample` to `.env` - then populate those values manually from the respective Azure resource pages using the Azure Portal. ## Conclusion + By reaching this section you should have every resource and installed the requirements necessary to conduct the hackathon. In the next challenges, you will use these services to start strongly your Azure AI Agents journey. Now the real fun begins! diff --git a/challenge-0/azure.yaml b/challenge-0/azure.yaml new file mode 100644 index 0000000..4e53ed8 --- /dev/null +++ b/challenge-0/azure.yaml @@ -0,0 +1,10 @@ +name: agentic-ai-hack-challenge-0 +metadata: + template: challenge-0 +infra: + provider: bicep + path: ./infra + template: main.bicep + parameters: + location: ${AZURE_LOCATION} + servicePrincipalObjectId: ${SERVICE_PRINCIPAL_OBJECT_ID:-''} diff --git a/challenge-0/get-keys.sh b/challenge-0/get-keys.sh index cbee5d4..8ecf90b 100755 --- a/challenge-0/get-keys.sh +++ b/challenge-0/get-keys.sh @@ -1,8 +1,8 @@ #!/bin/bash # -# This script will retrieve necessary keys and properties from Azure Resources -# deployed using "Deploy to Azure" button and will store them in a file named -# ".env" in the parent directory. +# This script retrieves the keys and connection information emitted by the +# Challenge 0 infrastructure deployment (now managed via Azure Developer CLI) +# and stores them in a file named ".env" in the repository root. # Login to Azure if [ -z "$(az account show)" ]; then @@ -30,12 +30,17 @@ fi # Get resource group deployments, find deployments starting with 'Microsoft.Template' and sort them by timestamp echo "Getting the deployments in '$resourceGroupName'..." -deploymentName=$(az deployment group list --resource-group $resourceGroupName --query "[?contains(name, 'Microsoft.Template') || contains(name, 'azuredeploy')].{name:name}[0].name" --output tsv) +deploymentName=$(az deployment group list --resource-group $resourceGroupName --query "sort_by([], &properties.timestamp)[-1].name" --output tsv) if [ $? -ne 0 ]; then echo "Error occurred while fetching deployments. Exiting..." exit 1 fi +if [ -z "$deploymentName" ]; then + echo "No deployments found in resource group '$resourceGroupName'. Ensure 'azd up' completed successfully." + exit 1 +fi + # Get output parameters from last deployment using Azure CLI queries instead of jq echo "Getting the output parameters from the last deployment '$deploymentName' in '$resourceGroupName'..." @@ -63,7 +68,7 @@ aiFoundryProjectEndpoint=$(az deployment group show --resource-group $resourceGr # If deployment outputs are empty, try to discover resources by type -if [ -z "$storageAccountName" ] || [ -z "$logAnalyticsWorkspaceName" ] || [ -z "$apiManagementName" ] || [ -z "$keyVaultName" ] || [ -z "$containerRegistryName" ]; then +if [ -z "$storageAccountName" ] || [ -z "$logAnalyticsWorkspaceName" ] || [ -z "$keyVaultName" ] || [ -z "$containerRegistryName" ]; then echo "Some deployment outputs not found, discovering missing resources by type..." if [ -z "$storageAccountName" ]; then @@ -78,10 +83,6 @@ if [ -z "$storageAccountName" ] || [ -z "$logAnalyticsWorkspaceName" ] || [ -z " searchServiceName=$(az search service list --resource-group $resourceGroupName --query "[0].name" -o tsv 2>/dev/null || echo "") fi - if [ -z "$apiManagementName" ]; then - apiManagementName=$(az apim list --resource-group $resourceGroupName --query "[0].name" -o tsv 2>/dev/null || echo "") - fi - if [ -z "$aiFoundryHubName" ]; then aiFoundryHubName=$(az cognitiveservices account list --resource-group $resourceGroupName --query "[?kind=='AIServices'].name | [0]" -o tsv 2>/dev/null || echo "") fi @@ -243,8 +244,14 @@ echo "AI_FOUNDRY_HUB_NAME=\"$aiFoundryHubName\"" >> ../.env echo "AI_FOUNDRY_PROJECT_NAME=\"$aiFoundryProjectName\"" >> ../.env echo "AI_FOUNDRY_ENDPOINT=\"$aiFoundryEndpoint\"" >> ../.env echo "AI_FOUNDRY_KEY=\"$aiFoundryKey\"" >> ../.env -acr_username=$(az acr credential show --name $containerRegistryName --query username -o tsv) -acr_password=$(az acr credential show --name $containerRegistryName --query passwords[0].value -o tsv) + +if [ -n "$containerRegistryName" ]; then + acr_username=$(az acr credential show --name $containerRegistryName --query username -o tsv 2>/dev/null || echo "") + acr_password=$(az acr credential show --name $containerRegistryName --query passwords[0].value -o tsv 2>/dev/null || echo "") +else + acr_username="" + acr_password="" +fi # Construct AI Foundry Hub Endpoint if missing if [ -z "$aiFoundryHubEndpoint" ] && [ -n "$aiFoundryHubName" ]; then echo "Constructing AI Foundry Hub Endpoint..." @@ -256,17 +263,6 @@ if [ -z "$aiFoundryHubEndpoint" ] && [ -n "$aiFoundryHubName" ]; then fi echo "AI_FOUNDRY_HUB_ENDPOINT=\"$aiFoundryHubEndpoint\"" >> ../.env -# Construct AI Foundry Project Endpoint if not found in deployment outputs -if [ -z "$aiFoundryProjectEndpoint" ] && [ -n "$aiFoundryHubName" ] && [ -n "$aiFoundryProjectName" ]; then - echo "Constructing AI Foundry Project Endpoint..." - aiFoundryProjectEndpoint="https://${aiFoundryHubName}.services.ai.azure.com/api/projects/${aiFoundryProjectName}" - echo "Constructed project endpoint: $aiFoundryProjectEndpoint" -elif [ -n "$aiFoundryProjectEndpoint" ] && [[ "$aiFoundryProjectEndpoint" == *"ai.azure.com/build/overview"* ]]; then - # If we got a web UI URL from deployment outputs, convert it to API endpoint - echo "Converting web UI URL to API endpoint..." - aiFoundryProjectEndpoint="https://${aiFoundryHubName}.services.ai.azure.com/api/projects/${aiFoundryProjectName}" - echo "Converted project endpoint: $aiFoundryProjectEndpoint" -fi echo "AI_FOUNDRY_PROJECT_ENDPOINT=\"$aiFoundryProjectEndpoint\"" >> ../.env echo "AZURE_AI_CONNECTION_ID=\"$azureAIConnectionId\"" >> ../.env # Azure Cosmos DB @@ -289,7 +285,6 @@ echo "=== Configuration Summary ===" echo "Storage Account: $storageAccountName" echo "Log Analytics Workspace: $logAnalyticsWorkspaceName" echo "Search Service: $searchServiceName" -echo "API Management: $apiManagementName" echo "AI Foundry Hub: $aiFoundryHubName" echo "AI Foundry Project: $aiFoundryProjectName" echo "Key Vault: $keyVaultName" diff --git a/challenge-0/infra/main.bicep b/challenge-0/infra/main.bicep new file mode 100644 index 0000000..5f12d0a --- /dev/null +++ b/challenge-0/infra/main.bicep @@ -0,0 +1,302 @@ +@allowed([ + 'swedencentral' +]) +@description('Azure location where resources should be deployed (e.g., swedencentral)') +param location string = 'swedencentral' + +@description('Optional: Object ID (Principal ID) of the service principal to grant permissions to AI Foundry resources') +param servicePrincipalObjectId string = '' + +var prefix = 'msagthack' +var suffix = uniqueString(resourceGroup().id) + +var storageAccountName = replace('${prefix}-sa-${suffix}', '-', '') +var logAnalyticsWorkspaceName = '${prefix}-loganalytics-${suffix}' +var searchServiceName = '${prefix}-search-${suffix}' +var containerRegistryName = replace('${prefix}cr${suffix}', '-', '') +var keyVaultName = '${prefix}kv${suffix}' +var aiFoundryName = '${prefix}-aifoundry-${suffix}' +var aiProjectName = '${prefix}-aiproject-${suffix}' +var applicationInsightsName = '${prefix}-appinsights-${suffix}' +var cosmosDbAccountName = '${prefix}-cosmos-${suffix}' + +var cognitiveServicesUserRoleId = subscriptionResourceId( + 'Microsoft.Authorization/roleDefinitions', + 'a97b65f3-24c7-4388-baec-2e87135dc908' +) +var searchServiceContributorRoleId = subscriptionResourceId( + 'Microsoft.Authorization/roleDefinitions', + '7ca78c08-252a-4471-8644-bb5ff32d4ba0' +) +var aiDeveloperRoleId = subscriptionResourceId( + 'Microsoft.Authorization/roleDefinitions', + '64702f94-c441-49e6-a78b-ef80e0188fee' +) +var contributorRoleId = subscriptionResourceId( + 'Microsoft.Authorization/roleDefinitions', + 'b24988ac-6180-42a0-ab88-20f7382dd24c' +) + +resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' = { + name: storageAccountName + location: location + sku: { + name: 'Standard_LRS' + } + kind: 'StorageV2' + properties: { + allowBlobPublicAccess: false + networkAcls: { + defaultAction: 'Allow' + } + } +} + +resource logAnalyticsWorkspace 'Microsoft.OperationalInsights/workspaces@2021-06-01' = { + name: logAnalyticsWorkspaceName + location: location + properties: { + retentionInDays: 30 + features: { + searchVersion: 1 + } + sku: { + name: 'PerGB2018' + } + } +} + +resource cosmosDbAccount 'Microsoft.DocumentDB/databaseAccounts@2023-04-15' = { + name: cosmosDbAccountName + location: location + kind: 'GlobalDocumentDB' + identity: { + type: 'SystemAssigned' + } + properties: { + consistencyPolicy: { + defaultConsistencyLevel: 'Session' + } + locations: [ + { + locationName: location + failoverPriority: 0 + isZoneRedundant: false + } + ] + databaseAccountOfferType: 'Standard' + enableAutomaticFailover: false + enableMultipleWriteLocations: false + capabilities: [ + { + name: 'EnableServerless' + } + ] + } +} + +resource searchService 'Microsoft.Search/searchServices@2023-11-01' = { + name: searchServiceName + location: location + sku: { + name: 'basic' + } + properties: { + hostingMode: 'default' + replicaCount: 1 + partitionCount: 1 + } +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-07-01' = { + name: containerRegistryName + location: location + sku: { + name: 'Basic' + } + properties: { + adminUserEnabled: true + } +} + +resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { + name: applicationInsightsName + location: location + kind: 'web' + properties: { + Application_Type: 'web' + WorkspaceResourceId: logAnalyticsWorkspace.id + } +} + +resource keyVault 'Microsoft.KeyVault/vaults@2023-07-01' = { + name: keyVaultName + location: location + properties: { + sku: { + family: 'A' + name: 'standard' + } + tenantId: subscription().tenantId + accessPolicies: [] + enabledForDeployment: false + enabledForDiskEncryption: false + enabledForTemplateDeployment: false + enableSoftDelete: true + softDeleteRetentionInDays: 90 + enableRbacAuthorization: true + } +} + +resource aiFoundry 'Microsoft.CognitiveServices/accounts@2025-04-01-preview' = { + name: aiFoundryName + location: location + identity: { + type: 'SystemAssigned' + } + sku: { + name: 'S0' + } + kind: 'AIServices' + properties: { + allowProjectManagement: true + customSubDomainName: aiFoundryName + disableLocalAuth: false + } +} + +resource aiProject 'Microsoft.CognitiveServices/accounts/projects@2025-04-01-preview' = { + name: aiProjectName + parent: aiFoundry + location: location + identity: { + type: 'SystemAssigned' + } + properties: {} +} + +resource gpt4MiniDeployment 'Microsoft.CognitiveServices/accounts/deployments@2024-10-01' = { + name: 'gpt-4.1-mini' + parent: aiFoundry + sku: { + capacity: 200 + name: 'GlobalStandard' + } + properties: { + model: { + name: 'gpt-4.1-mini' + format: 'OpenAI' + } + } +} + +resource embeddingDeployment 'Microsoft.CognitiveServices/accounts/deployments@2024-10-01' = { + name: 'text-embedding-ada-002' + parent: aiFoundry + sku: { + capacity: 200 + name: 'GlobalStandard' + } + properties: { + model: { + name: 'text-embedding-ada-002' + format: 'OpenAI' + } + } + dependsOn: [ + gpt4MiniDeployment + ] +} + +resource projectAIFoundryRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(aiFoundry.id, aiProject.id, cognitiveServicesUserRoleId) + scope: aiFoundry + properties: { + roleDefinitionId: cognitiveServicesUserRoleId + principalId: aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +resource aiFoundrySearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(searchService.id, aiFoundry.id, searchServiceContributorRoleId) + scope: searchService + properties: { + roleDefinitionId: searchServiceContributorRoleId + principalId: aiFoundry.identity.principalId + principalType: 'ServicePrincipal' + } +} + +resource projectSearchRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(searchService.id, aiProject.id, searchServiceContributorRoleId) + scope: searchService + properties: { + roleDefinitionId: searchServiceContributorRoleId + principalId: aiProject.identity.principalId + principalType: 'ServicePrincipal' + } +} + +resource servicePrincipalCognitiveServicesUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(servicePrincipalObjectId)) { + name: guid(aiFoundry.id, servicePrincipalObjectId, cognitiveServicesUserRoleId) + scope: aiFoundry + properties: { + roleDefinitionId: cognitiveServicesUserRoleId + principalId: servicePrincipalObjectId + principalType: 'ServicePrincipal' + } +} + +resource servicePrincipalAIDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(servicePrincipalObjectId)) { + name: guid(aiFoundry.id, servicePrincipalObjectId, aiDeveloperRoleId) + scope: aiFoundry + properties: { + roleDefinitionId: aiDeveloperRoleId + principalId: servicePrincipalObjectId + principalType: 'ServicePrincipal' + } +} + +resource servicePrincipalContributorRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(servicePrincipalObjectId)) { + name: guid(aiFoundry.id, servicePrincipalObjectId, contributorRoleId) + scope: aiFoundry + properties: { + roleDefinitionId: contributorRoleId + principalId: servicePrincipalObjectId + principalType: 'ServicePrincipal' + } +} + +resource searchConnection 'Microsoft.CognitiveServices/accounts/connections@2025-04-01-preview' = { + name: '${aiFoundry.name}-aisearch' + parent: aiFoundry + properties: { + category: 'CognitiveSearch' + target: 'https://${searchServiceName}.search.windows.net' + authType: 'ApiKey' + isSharedToAll: true + credentials: { + key: searchService.listAdminKeys().primaryKey + } + metadata: { + ApiType: 'Azure' + ResourceId: searchService.id + location: location + } + } +} + +output storageAccountName string = storageAccountName +output logAnalyticsWorkspaceName string = logAnalyticsWorkspaceName +output searchServiceName string = searchServiceName +output aiFoundryHubName string = aiFoundryName +output aiFoundryProjectName string = aiProjectName +output keyVaultName string = keyVaultName +output containerRegistryName string = containerRegistryName +output applicationInsightsName string = applicationInsightsName +output cosmosDbAccountName string = cosmosDbAccountName +output searchServiceEndpoint string = 'https://${searchServiceName}.search.windows.net/' +output aiFoundryHubEndpoint string = 'https://ml.azure.com/home?wsid=${aiFoundry.id}' +output aiFoundryProjectEndpoint string = 'https://ai.azure.com/build/overview?wsid=${aiProject.id}' +output cosmosDbEndpoint string = cosmosDbAccount.properties.documentEndpoint diff --git a/challenge-5/README.md b/challenge-5/README.md index f6e8a3a..9581790 100644 --- a/challenge-5/README.md +++ b/challenge-5/README.md @@ -35,22 +35,36 @@ We will have 3 agents that are each responsible for gathering and processing spe | Agent | Function | Data Source/Technology | Implementation | |-------|----------|----------------------|----------------| -| **Claim Reviewer Agent** | Analyzes insurance claims and damage assessments | Cosmos DB data | Azure AI Agent Service + SK Plugins | -| **Policy Checker Agent** | Validates coverage against insurance policies | Azure AI Search connection | Azure AI Agent Service | -| **Risk Analyzer Agent** | Evaluates risk factors and provides recommendations | Cosmos DB data | Azure AI Agent Service + SK Plugins | -| **Master Orchestrator Agent** | Coordinates the three agents and synthesizes their outputs | Combined Plugins + Tools | Semantic Kernel Orchestration | +| **Claim Reviewer Agent** | Analyzes insurance claims and damage assessments | Cosmos DB data | Azure OpenAI + Custom Plugins | +| **Policy Checker Agent** | Validates coverage against insurance policies | Azure AI Search connection | Azure OpenAI | +| **Risk Analyzer Agent** | Evaluates risk factors and provides recommendations | Cosmos DB data | Azure OpenAI + Custom Plugins | +| **Master Orchestrator** | Coordinates the three agents and synthesizes their outputs | Combined Tools | Microsoft Agent Framework Concurrent Orchestration | -### Understanding Implementation Approaches: Azure AI Agent Service vs Semantic Kernel Integration +### Understanding Implementation Approaches: Microsoft Agent Framework -When building intelligent agents, you have two primary implementation approaches available in the Azure ecosystem. **Azure AI Agent Service with direct tool connections** provides a streamlined, low-code approach where agents are configured through the Azure AI Foundry portal with direct connections to Azure services like Azure AI Search, enabling rapid prototyping and deployment with built-in enterprise features like security, monitoring, and compliance. This approach is ideal for straightforward scenarios where agents need to access specific Azure services without complex custom logic. In contrast, **Azure AI Agents with Semantic Kernel integration** offers a more flexible, code-first approach that combines the enterprise-grade capabilities of Azure AI Agent Service with Semantic Kernel's powerful plugin framework. This hybrid approach allows developers to create custom plugins with complex business logic, advanced data processing capabilities, and sophisticated integrations (like our Cosmos DB plugin for retrieving structured claim data), while still benefiting from Azure's managed infrastructure and security features. The Semantic Kernel approach is particularly valuable when you need custom data transformations, complex orchestration patterns, or when integrating with *non-Azure* services. +**Microsoft Agent Framework** provides a modern, code-first approach to building and orchestrating AI agents. The framework offers powerful concurrent orchestration capabilities through the `ConcurrentBuilder` class, which enables multiple agents to work in parallel on the same task. This approach is ideal for scenarios where you need: + +- **True Parallelism**: Multiple agents analyzing the same input simultaneously from different perspectives +- **Ensemble Reasoning**: Combining insights from multiple specialized agents for comprehensive analysis +- **Flexible Integration**: Easy integration with Azure OpenAI, custom tools, and data sources +- **Event-based Results**: Streaming results as they become available from each agent + +The Agent Framework approach is particularly valuable when you need custom orchestration patterns, sophisticated error handling, and when integrating with various data sources (like our Cosmos DB plugin for retrieving structured claim data). ## Exercise Guide - Time to Orchestrate! -## Part 1- Create your Semantic Kernel Orchestrator -Time to build your orchestrator! Please jump over to `orchestration.ipynb` file for a demonstration on how we will integrated our troop of agents to help us solve our pickle! -This notebook is composed of only two cells of code. The first one will have in it 4 core components: 3 are dedicated to the creation of the 3 agents we have defined and the last piece is a `task` will be the orchestrator, that defines specific instructions to orchestrate the 3 agent. +## Part 1- Create your Microsoft Agent Framework Orchestrator +Time to build your orchestrator! Please jump over to `orchestration.ipynb` file for a demonstration on how we will integrate our troop of agents to help us solve our challenge! + +This notebook demonstrates concurrent orchestration using Microsoft Agent Framework. The implementation includes: + +1. **Agent Creation**: Three specialized agents (Claim Reviewer, Risk Analyzer, Policy Checker) are created using the Azure OpenAI chat client +2. **Concurrent Workflow**: The `ConcurrentBuilder` class creates a workflow that runs all three agents in parallel +3. **Task Distribution**: Each agent receives the same task but applies their specialized perspective +4. **Result Aggregation**: Results are collected via an event stream as agents complete their analysis +5. **Final Decision**: An approver agent synthesizes all analyses to provide the final claim decision -In Semantic Kernel's Orchestration, [`tasks`](https://learn.microsoft.com/en-us/semantic-kernel/frameworks/agent/agent-orchestration/group-chat?pivots=programming-language-python#invoke-the-orchestration-1) revolve around integrating AI capabilities with traditional programming through a **modular** architecture. Core tasks include creating and managing skills (collections of related AI functions), designing and using prompts for both natural language and code generation, orchestrating planners to break down goals into executable steps, and using connectors to interface with external services like APIs or databases. Developers also manage memory for context retention, handle input/output pipelines, and coordinate execution flows that combine multiple skills or plugins. These components enable building intelligent, context-aware agents that can reason, plan, and act autonomously. +In Microsoft Agent Framework, concurrent orchestration enables true parallel execution where multiple agents work simultaneously on the same problem, each applying their domain expertise. This is achieved through the `ConcurrentBuilder` which fans out the task to all participating agents and aggregates their responses. ## Part 2 - Now onto automation! @@ -75,17 +89,28 @@ Response body (application/json): ### Part 2.1 Quick start - 1. **Configure environment variables**: Before running the application, you need to add the following environment variables manually to your `.env` file or set them in your shell environment: + 1. **Install required packages**: Before running the application, install the Microsoft Agent Framework and required dependencies: + + ```bash + pip install azure-identity agent-framework --pre + ``` + + Note: The `--pre` flag is required as Microsoft Agent Framework is currently in preview. + + 2. **Configure environment variables**: Add the following environment variables to your `.env` file or set them in your shell environment: ```bash - CLAIM_REV_AGENT_ID="" - RISK_ANALYZER_AGENT_ID="" - POLICY_CHECKER_AGENT_ID="" + AZURE_OPENAI_ENDPOINT="" + AZURE_OPENAI_KEY="" + AZURE_OPENAI_DEPLOYMENT_NAME="" + AZURE_OPENAI_API_VERSION="2024-10-01-preview" + COSMOS_ENDPOINT="" + COSMOS_KEY="" ``` - 2. Copy the .env file in root to the challenge-5 directory + 3. Copy the .env file in root to the challenge-5 directory - 3. Move to challenge-5 directory, create and activate a Python 3.11 virtual environment: + 4. Move to challenge-5 directory, create and activate a Python 3.11 virtual environment: ```bash cd challenge-5 @@ -93,19 +118,19 @@ Response body (application/json): source .venv/bin/activate ``` - 4. Install dependencies: + 5. Install dependencies: ```bash pip install -r requirements.txt ``` - 5. Run the app: + 6. Run the app: ```bash uvicorn main:app --reload --port 8000 ``` - 6. Open a new terminal and test your new app with curl: + 7. Open a new terminal and test your new app with curl: ```bash CLAIM_ID="CL001" @@ -253,11 +278,12 @@ Create environment and container app using the pushed image and set the same env ## ๐ŸŽฏ Conclusion -Congratulations! You've successfully built a multi-agent orchestration system that coordinates three specialized insurance agents through a Master Orchestrator. Your system now handles complete insurance claim processing workflows using GroupChat orchestration patterns with Semantic Kernel. +Congratulations! You've successfully built a multi-agent orchestration system using Microsoft Agent Framework that coordinates three specialized insurance agents through concurrent orchestration. Your system now handles complete insurance claim processing workflows with true parallel execution. **Key Achievements:** -- Implemented a GroupChat orchestration for agent processing -- Created a Master Orchestrator that synthesizes outputs from multiple agents -- Built hybrid solutions combining Azure AI Agent Service with custom Semantic Kernel plugins +- Implemented concurrent orchestration using Microsoft Agent Framework's ConcurrentBuilder +- Created a Master Orchestrator that synthesizes outputs from multiple agents running in parallel +- Built hybrid solutions combining Azure OpenAI with custom tool plugins - Developed a production-ready framework for intelligent insurance claim processing - Prepared the system for enterprise deployment to an Azure Container App with scalability and monitoring capabilities +- Leveraged modern agent orchestration patterns for efficient multi-perspective analysis diff --git a/challenge-5/agents/cosmos_tools.py b/challenge-5/agents/cosmos_tools.py new file mode 100644 index 0000000..0d3a70c --- /dev/null +++ b/challenge-5/agents/cosmos_tools.py @@ -0,0 +1,55 @@ +import os +import json +from typing import Annotated +from azure.cosmos import CosmosClient +from functools import lru_cache + +# Cache the Cosmos client to avoid recreating it on each call +@lru_cache(maxsize=1) +def _get_cosmos_client(): + """Get or create a cached Cosmos DB client.""" + endpoint = os.environ.get("COSMOS_ENDPOINT") + key = os.environ.get("COSMOS_KEY") + + if not endpoint or not key: + raise ValueError("COSMOS_ENDPOINT and COSMOS_KEY environment variables must be set") + + return CosmosClient(endpoint, key) + +# Define standalone functions that can be used as tools in Agent Framework + +def get_document_by_claim_id(claim_id: Annotated[str, "The claim_id to retrieve"]) -> Annotated[str, "JSON document from Cosmos DB"]: + """Retrieve a document by its claim_id using a cross-partition query.""" + database_name = "insurance_claims" + container_name = "crash_reports" + + try: + client = _get_cosmos_client() + database = client.get_database_client(database_name) + container = database.get_container_client(container_name) + + # Use SQL query to find document by claim_id across all partitions + query = "SELECT * FROM c WHERE c.claim_id = @claim_id" + parameters = [{"name": "@claim_id", "value": claim_id}] + + items = list(container.query_items( + query=query, + parameters=parameters, + enable_cross_partition_query=True, + max_item_count=1 + )) + + if not items: + return f"โŒ No document found with claim_id '{claim_id}' in container '{container_name}'." + + # Return the first matching document + document = items[0] + return json.dumps(document, indent=2, ensure_ascii=False) + + except ValueError as ve: + return f"โŒ Configuration error: {str(ve)}" + except Exception as e: + return f"โŒ Error retrieving document by claim_id '{claim_id}': {str(e)}" + +# Export the function +__all__ = ['get_document_by_claim_id'] diff --git a/challenge-5/main.py b/challenge-5/main.py index 67d6193..1fea1b8 100644 --- a/challenge-5/main.py +++ b/challenge-5/main.py @@ -1,25 +1,15 @@ from fastapi import FastAPI from pydantic import BaseModel -from typing import Dict, Any -from datetime import timedelta -from azure.identity.aio import DefaultAzureCredential - -from semantic_kernel.agents.runtime import InProcessRuntime -from semantic_kernel.contents import ChatMessageContent -from semantic_kernel.agents.open_ai.run_polling_options import RunPollingOptions -from azure.ai.agents.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole -from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread, Agent, ChatCompletionAgent, GroupChatOrchestration, RoundRobinGroupChatManager -from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from azure.identity import AzureCliCredential # async credential -from typing import Annotated +from typing import Dict, Any, List +from azure.identity import DefaultAzureCredential, AzureCliCredential +from agent_framework import ChatMessage, ConcurrentBuilder +from agent_framework.azure import AzureOpenAIChatClient import asyncio import os -import time -import asyncio import json import re -from agents.tools import CosmosDBPlugin +from agents.cosmos_tools import get_document_by_claim_id from dotenv import load_dotenv load_dotenv(override=True) @@ -32,92 +22,119 @@ class ClaimRequest(BaseModel): app = FastAPI(title="Claim API", version="0.1.0") -async def get_specialized_agents() -> list[Agent]: - """Get our specialized insurance processing agents using Semantic Kernel.""" - - print("๐Ÿ”ง Get specialized insurance agents...") - - # Create Cosmos DB plugin instances for different agents - cosmos_plugin_claims = CosmosDBPlugin() - cosmos_plugin_risk = CosmosDBPlugin() +async def get_specialized_agents() -> Dict[str, Any]: + """Get our specialized insurance processing agents using Microsoft Agent Framework.""" + print("๐Ÿ”ง Creating specialized insurance agents...") # Get environment variables - endpoint = os.environ.get("AI_FOUNDRY_PROJECT_ENDPOINT") - + # Try to use DefaultAzureCredential first, fall back to AzureCliCredential + try: + credential = DefaultAzureCredential() + except Exception as e: + print(f"โš ๏ธ DefaultAzureCredential failed: {str(e)}, falling back to AzureCliCredential") + credential = AzureCliCredential() - agents = {} + # Create Azure OpenAI chat client + # Agent Framework uses environment variables or explicit configuration + chat_client = AzureOpenAIChatClient(credential=credential) - async with DefaultAzureCredential() as creds: - client = AzureAIAgent.create_client(credential=creds, endpoint=endpoint) - print("โœ… Connected to AI Foundry endpoint.") - - # Getting Claim Reviewer Agent with Cosmos DB access - print("๐Ÿ” Getting Claim Reviewer Agent...") - claim_reviewer_definition = await client.agents.get_agent(agent_id=os.environ.get("CLAIM_REV_AGENT_ID")) - - claim_reviewer_agent = AzureAIAgent( - client=client, - definition=claim_reviewer_definition, - description="Agent that reviews insurance claims and retrieves claim details.", - plugins=[cosmos_plugin_claims] - ) - - # Getting Risk Analyzer Agent with Cosmos DB access - print("โš ๏ธ Getting Risk Analyzer Agent...") - risk_analyzer_definition = await client.agents.get_agent(agent_id=os.environ.get("RISK_ANALYZER_AGENT_ID")) - - risk_analyzer_agent = AzureAIAgent( - client=client, - definition=risk_analyzer_definition, - description="Agent that analyzes the risk associated with the claim.", - plugins=[cosmos_plugin_risk] - ) - - print("โœ… Getting Policy Checker Agent...") - - policy_checker_definition = await client.agents.get_agent(agent_id=os.environ.get("POLICY_CHECKER_AGENT_ID")) - - policy_checker_agent = AzureAIAgent( - client=client, - definition=policy_checker_definition, - description="Agent that checks if the policy covers the claim.", - ) - - approver_agent = ChatCompletionAgent( - name="ApproverAgent", - description="Final decision maker on insurance claims based on analysis from other agents.", - instructions=( - """You must analyze and process insurance claims based on the information provided by specialized agents. - You will provide a final decision on whether to approve or deny the claim, along with a detailed justification. - Your decision must be based on the specific findings and assessments from the Claim Reviewer, Risk Analyzer, and Policy Checker agents. - You must only approve if the claim is valid, risk is low or medium, and the policy covers the claim. - Say 'APPROVED' or 'DENIED' followed by your reasoning. - Format your response as a JSON object with 'decision' and 'justification' fields. - """ - ), - service=AzureChatCompletion( - deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"), - api_key=os.getenv("AZURE_OPENAI_KEY"), - endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), - ), - ) - - agents = [ - claim_reviewer_agent, - risk_analyzer_agent, - policy_checker_agent, - approver_agent - ] - - print("โœ… All specialized agents created/loaded successfully!") - return agents - -async def agent_response_callback(message: ChatMessageContent) -> None: - print(f"# {message.name}\n{message.content}") + # Create Claim Reviewer Agent with Cosmos DB access + print("๐Ÿ” Creating Claim Reviewer Agent...") + claim_reviewer_agent = chat_client.create_agent( + instructions="""You are an expert Insurance Claim Reviewer Agent specialized in analyzing and validating insurance claims. + Your primary responsibilities include: + 1. Use the get_document_by_claim_id function to retrieve claim data by claim_id, then: + 2. Review all claim details (dates, amounts, descriptions). + 3. Verify completeness of documentation and supporting evidence. + 4. Analyze damage assessments and cost estimates for reasonableness. + 5. Validate claim details against policy requirements. + 6. Identify inconsistencies, missing info, or red flags. + 7. Provide a detailed assessment with specific recommendations. + + **Response Format**: + A short paragraph description if the CLAIM STATUS is: VALID / QUESTIONABLE / INVALID ; Analysis: Summary of findings by component; Any missing Info / Concerns: List of issues or gaps; + Next Steps: Clear, actionable recommendations + """, + name="ClaimReviewer", + tools=[get_document_by_claim_id] + ) + + # Create Risk Analyzer Agent with Cosmos DB access + print("โš ๏ธ Creating Risk Analyzer Agent...") + risk_analyzer_agent = chat_client.create_agent( + instructions="""You are the Risk Analysis Agent. Your role is to evaluate the authenticity of insurance claims and detect potential fraud using available claim data. + Core Functions: + - Analyze historical and current claim data + - Identify suspicious patterns, inconsistencies, or anomalies + - Detect fraud indicators + - Assess claim credibility and assign a risk score + - Recommend follow-up actions if warranted + + Assessment Guidelines: + - Use the get_document_by_claim_id function to access claim records + - Look for unusual timing, inconsistent descriptions, irregular amounts, or clustering + - Check for repeat claim behavior or geographic overlaps + - Assess the overall risk profile of each claim + + Output Format: + - Risk Level: LOW / MEDIUM / HIGH + - Risk Analysis: Brief summary of findings + - Indicators: List of specific fraud signals (if any) + - Risk Score: 1โ€“10 scale + - Recommendation: Investigate / Monitor / No action needed + """, + name="RiskAnalyzer", + tools=[get_document_by_claim_id] + ) + + # Create Policy Checker Agent + print("๐Ÿ“‹ Creating Policy Checker Agent...") + policy_checker_agent = chat_client.create_agent( + instructions="""You are the Policy Checker Agent. + + Your task is to summarize a policy based on policy number. + + Instructions: + - Do not analyze claim details directly. + - Use your search tool to locate policy documents by policy number or policy type. + - Identify relevant exclusions, limits, and deductibles. + - Base your determination only on the contents of the retrieved policy. + + Output Format: + - Policy Number: [Policy number] + - Main important details + - Reference and quote specific policy sections that support your determination. + """, + name="PolicyChecker", + ) + # Create Approver Agent for final decision + print("โœ… Creating Approver Agent...") + approver_agent = chat_client.create_agent( + instructions="""You must analyze and process insurance claims based on the information provided by specialized agents. + You will provide a final decision on whether to approve or deny the claim, along with a detailed justification. + Your decision must be based on the specific findings and assessments from the Claim Reviewer, Risk Analyzer, and Policy Checker agents. + You must only approve if the claim is valid, risk is low or medium, and the policy covers the claim. + Say 'APPROVED' or 'DENIED' followed by your reasoning. + Format your response as a JSON object with 'decision' and 'justification' fields. + """, + name="ApproverAgent", + ) + + agents = { + 'claim_reviewer': claim_reviewer_agent, + 'risk_analyzer': risk_analyzer_agent, + 'policy_checker': policy_checker_agent, + 'approver': approver_agent, + 'chat_client': chat_client + } + + print("โœ… All specialized agents created successfully!") + return agents + async def run_insurance_claim_orchestration(claim_id: str, policy_number: str): - """Orchestrate multiple agents to process an insurance claim concurrently using only the claim ID.""" + """Orchestrate multiple agents to process an insurance claim concurrently using Microsoft Agent Framework.""" print(f"๐Ÿš€ Starting Concurrent Insurance Claim Processing Orchestration for claim ID: {claim_id} and policy number: {policy_number}") print(f"{'='*80}") @@ -125,40 +142,91 @@ async def run_insurance_claim_orchestration(claim_id: str, policy_number: str): # Create our specialized agents agents = await get_specialized_agents() - group_chat_orchestration = GroupChatOrchestration( - members=agents, - manager=RoundRobinGroupChatManager(max_rounds=4), - agent_response_callback=agent_response_callback, - ) - - # Create and start runtime - runtime = InProcessRuntime() - runtime.start() + # Create concurrent orchestration with the three analysis agents + workflow = ConcurrentBuilder().participants([ + agents['claim_reviewer'], + agents['risk_analyzer'], + agents['policy_checker'] + ]).build() try: # Create task that instructs agents to retrieve claim details first - task = f"""Analyze the insurance claim with ID: {claim_id} and policy number {policy_number} and come back with a decision on whether to approve or deny the claim.""" - # Invoke concurrent orchestration - orchestration_result = await group_chat_orchestration.invoke( - task=task, - runtime=runtime - ) + task = f"""Analyze the insurance claim with ID: {claim_id} and policy number {policy_number}. + +AGENT-SPECIFIC INSTRUCTIONS: + +Claim Reviewer Agent: +- Use get_document_by_claim_id("{claim_id}") to retrieve claim details +- Review all claim documentation and assess completeness +- Provide VALID/QUESTIONABLE/INVALID determination with detailed reasoning + +Risk Analyzer Agent: +- Use get_document_by_claim_id("{claim_id}") to retrieve claim data +- Analyze for fraud indicators and suspicious patterns +- Provide LOW/MEDIUM/HIGH risk assessment with specific evidence + +Policy Checker Agent: +- Search for policy documents using policy number: "{policy_number}" +- Identify relevant exclusions, limits, or deductibles +- Provide COVERED/NOT COVERED/PARTIAL COVERAGE determination + +Each agent must use their tools to retrieve and analyze actual data. +""" + + # Run concurrent orchestration + print(f"\n๐Ÿ”„ Invoking concurrent orchestration...") + events = await workflow.run(task) + + # Get outputs from the workflow + outputs = events.get_outputs() - # Get result - result = await orchestration_result.get(timeout=300) # 5 minute timeout + # Collect results from all agents + results = [] + if outputs: + for output in outputs: + # Output is a list of ChatMessage objects + messages = output if isinstance(output, list) else [output] + for msg in messages: + if hasattr(msg, 'text') and msg.text: + results.append(msg.text) + author = getattr(msg, 'author_name', 'Agent') + print(f"# {author} Response\n{msg.text}") + + # Now have the approver agent make final decision based on all analyses + print(f"\nโœ… Concurrent analysis complete. Running approver agent...") + + # Compile all analysis results for the approver + all_analyses = "\n\n".join([f"Agent Analysis:\n{result}" for result in results]) + + approver_task = f"""Based on the following analyses from specialized agents, provide a final decision on the insurance claim {claim_id}: + +{all_analyses} + +Provide your decision as a JSON object with 'decision' (APPROVED or DENIED) and 'justification' fields.""" + + # Create a separate workflow for the approver agent + approver_workflow = ConcurrentBuilder().participants([agents['approver']]).build() + approver_events = await approver_workflow.run(approver_task) + + # Get approver result + approver_outputs = approver_events.get_outputs() + approver_result = None + if approver_outputs: + for output in approver_outputs: + messages = output if isinstance(output, list) else [output] + for msg in messages: + if hasattr(msg, 'text') and msg.text: + approver_result = msg.text + break print(f"\nโœ… Insurance Claim Orchestration Complete!") - # print result - print(result) - return result + return approver_result if approver_result else all_analyses except Exception as e: print(f"โŒ Error during orchestration: {str(e)}") + import traceback + traceback.print_exc() raise - - finally: - await runtime.stop_when_idle() - print(f"\n๐Ÿงน Orchestration cleanup complete.") def _normalize_orchestration_result(result: Any) -> Dict[str, Any]: """Normalize whatever the orchestration returns into a simple dict. diff --git a/challenge-5/orchestration.ipynb b/challenge-5/orchestration.ipynb index 8a4fe16..6533d4d 100644 --- a/challenge-5/orchestration.ipynb +++ b/challenge-5/orchestration.ipynb @@ -5,72 +5,70 @@ "id": "7a4c892e", "metadata": {}, "source": [ - "# ๐Ÿ“‹ Multi-Agent Insurance Claim Orchestration\n", + "# \ud83d\udccb Multi-Agent Insurance Claim Orchestration\n", "\n", "## Overview\n", - "The `orchestration.ipynb` notebook implements a **sophisticated multi-agent orchestration system** for insurance claim processing using **Microsoft Semantic Kernel** and **Azure AI Agent Service**. This notebook demonstrates advanced concurrent agent coordination to analyze insurance claims from multiple specialized perspectives simultaneously.\n", + "The `orchestration.ipynb` notebook implements a **sophisticated multi-agent orchestration system** for insurance claim processing using **Microsoft Agent Framework** and **Azure OpenAI**. This notebook demonstrates advanced concurrent agent coordination to analyze insurance claims from multiple specialized perspectives simultaneously.\n", "\n", - "## ๐Ÿ—๏ธ Architecture & Components\n", + "## \ud83c\udfd7\ufe0f Architecture & Components\n", "\n", "### Core Technologies\n", - "- **Microsoft Semantic Kernel**: Agent orchestration framework\n", - "- **Azure AI Agent Service**: Cloud-based agent hosting and management\n", - "- **Concurrent Orchestration**: Parallel execution of multiple AI agents\n", + "- **Microsoft Agent Framework**: Modern agent orchestration framework for concurrent execution\n", + "- **Azure OpenAI**: Cloud-based AI model hosting and inference\n", + "- **Concurrent Orchestration**: Parallel execution of multiple AI agents using ConcurrentBuilder\n", "- **Azure Cosmos DB Integration**: Real-time data access through custom plugins\n", "- **Azure Identity**: Secure authentication for Azure services\n", "\n", "### Agent Specializations\n", "The system creates three specialized AI agents that work concurrently:\n", "\n", - "1. **๐Ÿ” Claim Reviewer Agent**\n", + "1. **\ud83d\udd0d Claim Reviewer Agent**\n", " - Validates claim documentation completeness\n", " - Analyzes damage assessments and cost estimates\n", " - Identifies inconsistencies or missing information\n", " - Provides VALID/QUESTIONABLE/INVALID determinations\n", " - Equipped with Cosmos DB plugin for data retrieval\n", "\n", - "2. **โš ๏ธ Risk Analyzer Agent**\n", + "2. **\u26a0\ufe0f Risk Analyzer Agent**\n", " - Detects fraud patterns and suspicious indicators\n", " - Assesses claim authenticity and credibility\n", " - Analyzes timing, amounts, and circumstances\n", " - Provides LOW/MEDIUM/HIGH risk assessments\n", " - Equipped with Cosmos DB plugin for historical analysis\n", "\n", - "3. **๐Ÿ“‹ Policy Checker Agent**\n", + "3. **\ud83d\udccb Policy Checker Agent**\n", " - Validates coverage against policy terms\n", " - Interprets limits, deductibles, and exclusions\n", " - Handles multiple policy types (Auto, Commercial, Motorcycle, etc.)\n", " - Provides COVERED/NOT COVERED/PARTIAL COVERAGE determinations\n", " - Uses Azure AI Search for policy document analysis\n", "\n", - "## ๐Ÿ”ง Key Functions\n", + "## \ud83d\udd27 Key Functions\n", "\n", "### `create_specialized_agents()`\n", - "- Establishes Azure AI Agent client connections\n", + "- Creates Azure OpenAI chat client with proper authentication\n", "- Configures each agent with specialized instructions and capabilities\n", "- Sets up Cosmos DB plugins for data-enabled agents\n", - "- Configures polling options for optimal performance\n", "- Returns a collection of ready-to-use specialized agents\n", "\n", "### `run_insurance_claim_orchestration()`\n", - "- **Concurrent Processing**: All three agents analyze claims simultaneously\n", + "- **Concurrent Processing**: All three agents analyze claims simultaneously using ConcurrentBuilder\n", "- **Intelligent Task Distribution**: Each agent receives specialized instructions\n", "- **Real-time Data Access**: Agents can retrieve claim data using provided claim IDs\n", "- **Comprehensive Reporting**: Generates unified analysis reports combining all agent outputs\n", "- **Error Handling**: Robust exception management and resource cleanup\n", "- **Progress Tracking**: Detailed logging of orchestration stages\n", "\n", - "## ๐Ÿš€ Orchestration Flow\n", + "## \ud83d\ude80 Orchestration Flow\n", "\n", "1. **Agent Creation**: Initializes three specialized insurance processing agents\n", - "2. **Concurrent Orchestration Setup**: Creates parallel execution framework\n", + "2. **Concurrent Workflow Setup**: Creates parallel execution framework using ConcurrentBuilder\n", "3. **Task Distribution**: Assigns specialized analysis tasks to each agent\n", "4. **Parallel Execution**: All agents work simultaneously on their respective analyses\n", - "5. **Result Aggregation**: Collects and consolidates outputs from all agents\n", + "5. **Result Aggregation**: Collects and consolidates outputs from all agents via event stream\n", "6. **Report Generation**: Creates comprehensive analysis report with all findings\n", - "7. **Resource Cleanup**: Properly terminates runtime and releases resources\n", "\n", - "## ๐Ÿ“Š Output Format\n", + "## \ud83d\udcca Output Format\n", "\n", "The system generates a comprehensive **Insurance Claim Analysis Report** that includes:\n", "\n", @@ -80,20 +78,20 @@ "- **Policy Coverage Determination**: Coverage eligibility and terms validation\n", "- **Unified Recommendations**: Consolidated next steps based on all agent analyses\n", "\n", - "## ๐Ÿ’ก Advanced Features\n", + "## \ud83d\udca1 Advanced Features\n", "\n", - "- **Concurrent Execution**: True parallelism for faster processing\n", - "- **Database Integration**: Real-time access to claim and policy data\n", + "- **True Concurrent Execution**: Leverages Microsoft Agent Framework's ConcurrentBuilder for efficient parallelism\n", + "- **Database Integration**: Real-time access to claim and policy data through plugins\n", "- **Flexible Configuration**: Environment-based model and endpoint configuration\n", "- **Comprehensive Logging**: Detailed progress tracking and status updates\n", - "- **Timeout Management**: 5-minute timeout for orchestration completion\n", + "- **Event-based Results**: Streams results as they become available from each agent\n", "- **Error Recovery**: Graceful handling of individual agent failures\n", "\n", - "## ๐ŸŽฏ Use Case Example\n", + "## \ud83c\udfaf Use Case Example\n", "\n", - "The notebook includes a practical example demonstrating the orchestration of claim analysis for \"CL002\", showing how the system processes claim details and retrieves additional data to provide comprehensive multi-agent analysis.\n", + "The notebook includes a practical example demonstrating the orchestration of claim analysis for a specific claim ID and policy number, showing how the system processes claim details and retrieves additional data to provide comprehensive multi-agent analysis.\n", "\n", - "This implementation represents a cutting-edge approach to insurance claim processing, leveraging the power of concurrent AI agents to provide thorough, multi-perspective analysis that would traditionally require multiple human experts working in sequence.\n" + "This implementation represents a cutting-edge approach to insurance claim processing, leveraging the power of Microsoft Agent Framework's concurrent orchestration to provide thorough, multi-perspective analysis that would traditionally require multiple human experts working in sequence.\n" ] }, { @@ -103,257 +101,227 @@ "metadata": {}, "outputs": [], "source": [ - "# Import necessary libraries for Semantic Kernel orchestration\n", - "import asyncio\n", - "import os\n", - "from typing import Dict, Any\n", - "from datetime import timedelta\n", - "from azure.identity.aio import DefaultAzureCredential\n", - "from semantic_kernel.agents import (\n", - " AzureAIAgent, \n", - " ConcurrentOrchestration\n", - ")\n", - "from semantic_kernel.agents.runtime import InProcessRuntime\n", - "from semantic_kernel.agents.open_ai.run_polling_options import RunPollingOptions\n", - "from azure.ai.agents.models import AzureAISearchQueryType, AzureAISearchTool, ListSortOrder, MessageRole\n", - "from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread\n", - "from azure.identity import AzureCliCredential # async credential\n", - "\n", - "# Import the Cosmos DB plugin\n", - "from agents.tools import CosmosDBPlugin\n", - "from dotenv import load_dotenv\n", - "\n", - "load_dotenv(override=True) # This forces a reload of the .env file\n", - "\n", - "\n", - "async def create_specialized_agents():\n", - " \"\"\"Create our specialized insurance processing agents using Semantic Kernel.\"\"\"\n", - " \n", - " print(\"๐Ÿ”ง Creating specialized insurance agents...\")\n", - " \n", - " # Create Cosmos DB plugin instances for different agents\n", - " cosmos_plugin_claims = CosmosDBPlugin()\n", - " cosmos_plugin_risk = CosmosDBPlugin()\n", - " \n", - " # Get environment variables\n", - " endpoint = os.environ.get(\"AI_FOUNDRY_PROJECT_ENDPOINT\")\n", - " model_deployment = os.environ.get(\"MODEL_DEPLOYMENT_NAME\", \"gpt-4.1-mini\")\n", - " \n", - " agents = {}\n", - " \n", - " async with DefaultAzureCredential() as creds:\n", - " client = AzureAIAgent.create_client(credential=creds, endpoint=endpoint)\n", - " \n", - " # Create Claim Reviewer Agent with Cosmos DB access\n", - " print(\"๐Ÿ” Creating Claim Reviewer Agent...\")\n", - " claim_reviewer_definition = await client.agents.create_agent(\n", - " model=model_deployment,\n", - " name=\"ClaimReviewer\",\n", - " description=\"Expert Insurance Claim Reviewer Agent specialized in analyzing and validating insurance claims\",\n", - " instructions=\"\"\"You are an expert Insurance Claim Reviewer Agent specialized in analyzing and validating insurance claims. \n", - " Your primary responsibilities include:\n", - " 1. Use the Cosmos DB plugin to retrieve claim data by claim_id, then:\n", - " 2.Review all claim details (dates, amounts, descriptions).\n", - " 3. Verify completeness of documentation and supporting evidence.\n", - " 4. Analyze damage assessments and cost estimates for reasonableness.\n", - " 5. Validate claim details against policy requirements.\n", - " 6. Identify inconsistencies, missing info, or red flags.\n", - " 7. Provide a detailed assessment with specific recommendations.\n", - "\n", - " **Response Format**:\n", - "\n", - " A short paragraph description if the CLAIM STATUS is: VALID / QUESTIONABLE / INVALID ; Analysis: Summary of findings by component; Any missing Info / Concerns: List of issues or gaps;\n", - " Next Steps: Clear, actionable recommendations\n", - " \"\"\"\n", - " )\n", - " \n", - " claim_reviewer_agent = AzureAIAgent(\n", - " client=client,\n", - " definition=claim_reviewer_definition,\n", - " plugins=[cosmos_plugin_claims]\n", - " )\n", - " \n", - " # Create Risk Analyzer Agent with Cosmos DB access\n", - " print(\"โš ๏ธ Creating Risk Analyzer Agent...\")\n", - " risk_analyzer_definition = await client.agents.create_agent(\n", - " model=model_deployment,\n", - " name=\"RiskAnalyzer\",\n", - " instructions=\"\"\"You are the Risk Analysis Agent. Your role is to evaluate the authenticity of insurance claims and detect potential fraud using available claim data.\n", - " Core Functions:\n", - " - Analyze historical and current claim data\n", - " - Identify suspicious patterns, inconsistencies, or anomalies\n", - " - Detect fraud indicators\n", - " - Assess claim credibility and assign a risk score\n", - " - Recommend follow-up actions if warranted\n", - "\n", - " Assessment Guidelines:\n", - " - Use the Cosmos DB plugin to access claim records\n", - " - Look for unusual timing, inconsistent descriptions, irregular amounts, or clustering\n", - " - Check for repeat claim behavior or geographic overlaps\n", - " - Assess the overall risk profile of each claim\n", - "\n", - " Fraud Indicators to Watch For:\n", - " - Claims with irregular timing\n", - " - Contradictory or vague damage descriptions\n", - " - Unusual or repetitive claim amounts\n", - " - Multiple recent claims under same or related profiles\n", - " - Geographic or temporal clustering of incidents\n", - "\n", - " Output Format:\n", - " - Risk Level: LOW / MEDIUM / HIGH\n", - " - Risk Analysis: Brief summary of findings\n", - " - Indicators: List of specific fraud signals (if any)\n", - " - Risk Score: 1โ€“10 scale\n", - " - Recommendation: Investigate / Monitor / No action needed\n", - "\n", - " Base all assessments strictly on the available claim data. Use structured reasoning and avoid assumptions beyond the data.\n", - " \"\"\",\n", - " )\n", - " \n", - " risk_analyzer_agent = AzureAIAgent(\n", - " client=client,\n", - " definition=risk_analyzer_definition,\n", - " plugins=[cosmos_plugin_risk]\n", - " )\n", - " \n", - " ai_agent_settings = AzureAIAgentSettings(model_deployment_name= os.environ.get(\"MODEL_DEPLOYMENT_NAME\"), azure_ai_search_connection_id=os.environ.get(\"AZURE_AI_AGENT_ENDPOINT\")) \n", - " ai_search = AzureAISearchTool(\n", - " index_connection_id=os.environ.get(\"AZURE_AI_CONNECTION_ID\"), \n", - " index_name=\"insurance-documents-index\"\n", - " )\n", - "\n", - " # Create agent definition\n", - " policy_agent_definition = await client.agents.create_agent(\n", - " name=\"PolicyChecker\", \n", - " model=os.environ.get(\"MODEL_DEPLOYMENT_NAME\"),\n", - " instructions=\"\"\"\"\n", - " You are the Policy Checker Agent.\n", - "\n", - " Your task is to summarize a policy based on policy number.\n", - "\n", - " Instructions:\n", - " - Do not analyze claim details directly.\n", - " - Use your search tool to locate policy documents by policy number or policy type.\n", - " - Identify relevant exclusions, limits, and deductibles.\n", - " - Base your determination only on the contents of the retrieved policy.\n", - "\n", - " Output Format:\n", - " - Policy Number: [Policy number]\n", - " - Main important details\n", - " - Reference and quote specific policy sections that support your determination.\n", - " - Clearly explain how the policy language leads to your conclusion.\n", - "\n", - " Be precise, objective, and rely solely on the policy content.\n", - " \"\"\",\n", - " tools=ai_search.definitions,\n", - " tool_resources=ai_search.resources,\n", - " headers={\"x-ms-enable-preview\": \"true\"},\n", - " )\n", - "\n", - " policy_checker_agent = AzureAIAgent(\n", - " client=client, \n", - " definition=policy_agent_definition\n", - " )\n", - "\n", - " agents = {\n", - " 'claim_reviewer': claim_reviewer_agent,\n", - " 'risk_analyzer': risk_analyzer_agent,\n", - " 'policy_checker': policy_checker_agent\n", - " }\n", - " \n", - " print(\"โœ… All specialized agents created/loaded successfully!\")\n", - " return agents, client\n", - "\n", - "async def run_insurance_claim_orchestration(claim_id: str, policy_number: str):\n", - " \"\"\"Orchestrate multiple agents to process an insurance claim concurrently using only the claim ID.\"\"\"\n", - " \n", - " print(f\"๐Ÿš€ Starting Concurrent Insurance Claim Processing Orchestration\")\n", - " print(f\"{'='*80}\")\n", - " \n", - " # Create our specialized agents\n", - " agents, client = await create_specialized_agents()\n", - " \n", - " # Create concurrent orchestration with all three agents\n", - " orchestration = ConcurrentOrchestration(\n", - " members=[agents['claim_reviewer'], agents['risk_analyzer'], agents['policy_checker']]\n", - " )\n", - " \n", - " # Create and start runtime\n", - " runtime = InProcessRuntime()\n", - " runtime.start()\n", - " \n", - " try: \n", - " # Create task that instructs agents to retrieve claim details first\n", - " task = f\"\"\"Analyze the insurance claim with ID: {claim_id} or the policy number {policy_number} and come back with a critical solution for if the credit should be approved.\n", - "\n", - "CRITICAL: ALL AGENTS MUST USE THEIR AVAILABLE TOOLS TO RETRIEVE INFORMATION\n", - "\n", - "AGENT-SPECIFIC INSTRUCTIONS:\n", - "\n", - "Claim Reviewer Agent: \n", - "- MUST USE: get_document_by_claim_id(\"{claim_id}\") to retrieve claim details\n", - "- Review all claim documentation and assess completeness\n", - "- Validate damage estimates and repair costs against retrieved data\n", - "- Check for proper evidence and documentation in the claim data\n", - "- Cross-reference claim amounts with industry standards\n", - "- Provide VALID/QUESTIONABLE/INVALID determination with detailed reasoning\n", - "\n", - "Risk Analyzer Agent:\n", - "- MUST USE: get_document_by_claim_id(\"{claim_id}\") to retrieve claim data\n", - "- Analyze the retrieved data for fraud indicators and suspicious patterns\n", - "- Assess claim authenticity and credibility based on actual claim details\n", - "- Check for unusual timing, amounts, or circumstances in the data\n", - "- Look for inconsistencies between different parts of the claim\n", - "- Provide LOW/MEDIUM/HIGH risk assessment with specific evidence\n", - "\n", - "Policy Checker Agent (policy_checker_agent):\n", - "- YOU DO NOT NEED TO LOOK INTO CLAIMS!\n", - "- MUST USE: Your search capabilities to find relevant policy documents by policy number (\"{policy_number}\") or type found in the claim data\n", - "- Search for policy documents using policy numbers\n", - "- Identify relevant exclusions, limits, or deductibles from actual policy documents\n", - "- Provide COVERED/NOT COVERED/PARTIAL COVERAGE determination with policy references\n", - "- Quote specific policy sections that support your determination\n", - "\n", - "IMPORTANT: Each agent MUST actively use their tools to retrieve and analyze actual data. \n", - "Do not provide generic responses - base your analysis on the specific claim data and policy documents retrieved through your tools.\n", - "\"\"\"\n", - " # Invoke concurrent orchestration\n", - " orchestration_result = await orchestration.invoke(\n", - " task=task,\n", - " runtime=runtime\n", - " )\n", - " \n", - " # Get results from all agents\n", - " results = await orchestration_result.get(timeout=300) # 5 minute timeout\n", - " \n", - " print(f\"\\n๐ŸŽ‰ All agents completed their analysis!\")\n", - " print(f\"{'โ”€'*60}\")\n", - " \n", - " # Display individual results\n", - " for i, result in enumerate(results, 1):\n", - " agent_name = result.name if hasattr(result, 'name') else f\"Agent {i}\"\n", - " content = str(result.content)\n", - " print(f\"\\n๐Ÿค– {agent_name} Analysis:\")\n", - " print(f\"{'โ”€'*40}\")\n", - " print(content)\n", - " \n", - " # Create comprehensive analysis report\n", - " comprehensive_analysis = f\"\"\"\n", - "\n", - "{chr(10).join([f\"### {result.name} Assessment:{chr(10)}{chr(10)}{result.content}{chr(10)}\" for result in results])}\n", - "\n", - "\"\"\"\n", - " \n", - " print(f\"\\nโœ… Concurrent Insurance Claim Orchestration Complete!\")\n", - " return comprehensive_analysis\n", - " \n", + "# Import necessary libraries for Microsoft Agent Framework orchestration", + "import asyncio", + "import os", + "from typing import Dict, Any", + "from azure.identity import DefaultAzureCredential, AzureCliCredential", + "from agent_framework import ChatMessage, ConcurrentBuilder", + "from agent_framework.azure import AzureOpenAIChatClient", + "", + "# Import the Cosmos DB tools", + "from agents.cosmos_tools import get_document_by_claim_id", + "from dotenv import load_dotenv", + "", + "load_dotenv(override=True) # This forces a reload of the .env file", + "", + "", + "async def create_specialized_agents():", + " \"\"\"Create our specialized insurance processing agents using Microsoft Agent Framework.\"\"\"", + " ", + " print(\"\ud83d\udd27 Creating specialized insurance agents...\")", + " ", + " # Get environment variables", + " # For Agent Framework, we use AzureOpenAIChatClient which connects to Azure OpenAI", + " # Try to use DefaultAzureCredential first, fall back to AzureCliCredential", + " try:", + " credential = DefaultAzureCredential()", " except Exception as e:\n", - " print(f\"โŒ Error during orchestration: {str(e)}\")\n", - " raise\n", - " \n", - " finally:\n", - " await runtime.stop_when_idle()\n", - " print(f\"\\n๐Ÿงน Orchestration cleanup complete.\")" + " print(f\"\u26a0\ufe0f DefaultAzureCredential failed: {str(e)}, falling back to AzureCliCredential\")\n", + " credential = AzureCliCredential()", + " ", + " # Create Azure OpenAI chat client", + " # Agent Framework uses environment variables or explicit configuration", + " chat_client = AzureOpenAIChatClient(credential=credential)", + " ", + " agents = {}", + " ", + " # Create Claim Reviewer Agent with Cosmos DB access", + " print(\"\ud83d\udd0d Creating Claim Reviewer Agent...\")", + " claim_reviewer_agent = chat_client.create_agent(", + " instructions=\"\"\"You are an expert Insurance Claim Reviewer Agent specialized in analyzing and validating insurance claims. ", + " Your primary responsibilities include:", + " 1. Use the get_document_by_claim_id function to retrieve claim data by claim_id, then:", + " 2. Review all claim details (dates, amounts, descriptions).", + " 3. Verify completeness of documentation and supporting evidence.", + " 4. Analyze damage assessments and cost estimates for reasonableness.", + " 5. Validate claim details against policy requirements.", + " 6. Identify inconsistencies, missing info, or red flags.", + " 7. Provide a detailed assessment with specific recommendations.", + "", + " **Response Format**:", + " A short paragraph description if the CLAIM STATUS is: VALID / QUESTIONABLE / INVALID ; Analysis: Summary of findings by component; Any missing Info / Concerns: List of issues or gaps;", + " Next Steps: Clear, actionable recommendations", + " \"\"\",", + " name=\"ClaimReviewer\",", + " tools=[get_document_by_claim_id]", + " )", + " ", + " # Create Risk Analyzer Agent with Cosmos DB access", + " print(\"\u26a0\ufe0f Creating Risk Analyzer Agent...\")", + " risk_analyzer_agent = chat_client.create_agent(", + " instructions=\"\"\"You are the Risk Analysis Agent. Your role is to evaluate the authenticity of insurance claims and detect potential fraud using available claim data.", + " Core Functions:", + " - Analyze historical and current claim data", + " - Identify suspicious patterns, inconsistencies, or anomalies", + " - Detect fraud indicators", + " - Assess claim credibility and assign a risk score", + " - Recommend follow-up actions if warranted", + "", + " Assessment Guidelines:", + " - Use the get_document_by_claim_id function to access claim records", + " - Look for unusual timing, inconsistent descriptions, irregular amounts, or clustering", + " - Check for repeat claim behavior or geographic overlaps", + " - Assess the overall risk profile of each claim", + "", + " Fraud Indicators to Watch For:", + " - Claims with irregular timing", + " - Contradictory or vague damage descriptions", + " - Unusual or repetitive claim amounts", + " - Multiple recent claims under same or related profiles", + " - Geographic or temporal clustering of incidents", + "", + " Output Format:", + " - Risk Level: LOW / MEDIUM / HIGH", + " - Risk Analysis: Brief summary of findings", + " - Indicators: List of specific fraud signals (if any)", + " - Risk Score: 1\u201310 scale", + " - Recommendation: Investigate / Monitor / No action needed", + "", + " Base all assessments strictly on the available claim data. Use structured reasoning and avoid assumptions beyond the data.", + " \"\"\",", + " name=\"RiskAnalyzer\",", + " tools=[get_document_by_claim_id]", + " )", + " ", + " # Create Policy Checker Agent", + " print(\"\ud83d\udccb Creating Policy Checker Agent...\")", + " policy_checker_agent = chat_client.create_agent(", + " instructions=\"\"\"You are the Policy Checker Agent.", + "", + " Your task is to summarize a policy based on policy number.", + "", + " Instructions:", + " - Do not analyze claim details directly.", + " - Use your search tool to locate policy documents by policy number or policy type.", + " - Identify relevant exclusions, limits, and deductibles.", + " - Base your determination only on the contents of the retrieved policy.", + "", + " Output Format:", + " - Policy Number: [Policy number]", + " - Main important details", + " - Reference and quote specific policy sections that support your determination.", + " - Clearly explain how the policy language leads to your conclusion.", + "", + " Be precise, objective, and rely solely on the policy content.", + " \"\"\",", + " name=\"PolicyChecker\",", + " )", + " ", + " agents = {", + " 'claim_reviewer': claim_reviewer_agent,", + " 'risk_analyzer': risk_analyzer_agent,", + " 'policy_checker': policy_checker_agent", + " }", + " ", + " print(\"\u2705 All specialized agents created successfully!\")", + " return agents, chat_client", + "", + "async def run_insurance_claim_orchestration(claim_id: str, policy_number: str):", + " \"\"\"Orchestrate multiple agents to process an insurance claim concurrently using Microsoft Agent Framework.\"\"\"", + " ", + " print(f\"\ud83d\ude80 Starting Concurrent Insurance Claim Processing Orchestration\")", + " print(f\"{'='*80}\")", + " ", + " # Create our specialized agents", + " agents, chat_client = await create_specialized_agents()", + " ", + " # Create concurrent orchestration with all three agents", + " workflow = ConcurrentBuilder().participants([", + " agents['claim_reviewer'],", + " agents['risk_analyzer'],", + " agents['policy_checker']", + " ]).build()", + " ", + " try: ", + " # Create task that instructs agents to retrieve claim details first", + " task = f\"\"\"Analyze the insurance claim with ID: {claim_id} or the policy number {policy_number} and come back with a critical solution for if the claim should be approved.", + "", + "CRITICAL: ALL AGENTS MUST USE THEIR AVAILABLE TOOLS TO RETRIEVE INFORMATION", + "", + "AGENT-SPECIFIC INSTRUCTIONS:", + "", + "Claim Reviewer Agent: ", + "- MUST USE: get_document_by_claim_id(\"{claim_id}\") to retrieve claim details", + "- Review all claim documentation and assess completeness", + "- Validate damage estimates and repair costs against retrieved data", + "- Check for proper evidence and documentation in the claim data", + "- Cross-reference claim amounts with industry standards", + "- Provide VALID/QUESTIONABLE/INVALID determination with detailed reasoning", + "", + "Risk Analyzer Agent:", + "- MUST USE: get_document_by_claim_id(\"{claim_id}\") to retrieve claim data", + "- Analyze the retrieved data for fraud indicators and suspicious patterns", + "- Assess claim authenticity and credibility based on actual claim details", + "- Check for unusual timing, amounts, or circumstances in the data", + "- Look for inconsistencies between different parts of the claim", + "- Provide LOW/MEDIUM/HIGH risk assessment with specific evidence", + "", + "Policy Checker Agent (policy_checker_agent):", + "- YOU DO NOT NEED TO LOOK INTO CLAIMS!", + "- MUST USE: Your search capabilities to find relevant policy documents by policy number (\"{policy_number}\") or type found in the claim data", + "- Search for policy documents using policy numbers", + "- Identify relevant exclusions, limits, or deductibles from actual policy documents", + "- Provide COVERED/NOT COVERED/PARTIAL COVERAGE determination with policy references", + "- Quote specific policy sections that support your determination", + "", + "IMPORTANT: Each agent MUST actively use their tools to retrieve and analyze actual data. ", + "Do not provide generic responses - base your analysis on the specific claim data and policy documents retrieved through your tools.", + "\"\"\"", + " ", + " # Run the concurrent orchestration", + " print(f\"\\n\ud83d\udd04 Invoking concurrent orchestration...\")", + " events = await workflow.run(task)", + " ", + " print(f\"\\n\ud83c\udf89 All agents completed their analysis!\")", + " print(f\"{'\u2500'*60}\")", + " ", + " # Get outputs from the workflow", + " outputs = events.get_outputs()", + " ", + " # Collect results from all agents", + " results = []", + " if outputs:", + " for output in outputs:", + " # Output is a list of ChatMessage objects", + " messages = output if isinstance(output, list) else [output]", + " for msg in messages:", + " if hasattr(msg, 'text') and msg.text:", + " results.append(msg.text)", + " author = getattr(msg, 'author_name', 'Agent')", + " print(f\"\\n\ud83e\udd16 {author} Analysis:\")", + " print(f\"{'\u2500'*40}\")", + " print(msg.text)", + " ", + " # Create comprehensive analysis report", + " comprehensive_analysis = f\"\"\"", + "", + "{chr(10).join([f\"### Agent Assessment:{chr(10)}{chr(10)}{result}{chr(10)}\" for result in results])}", + "", + "\"\"\"", + " ", + " print(f\"\\n\u2705 Concurrent Insurance Claim Orchestration Complete!\")", + " return comprehensive_analysis", + " ", + " except Exception as e:", + " print(f\"\u274c Error during orchestration: {str(e)}\")", + " import traceback", + " traceback.print_exc()", + " raise", + " ", + " finally:", + " print(f\"\\n\ud83e\uddf9 Orchestration cleanup complete.\")", + "" ] }, { @@ -390,4 +358,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/challenge-5/requirements.txt b/challenge-5/requirements.txt index 7cf7fe0..7785456 100644 --- a/challenge-5/requirements.txt +++ b/challenge-5/requirements.txt @@ -119,7 +119,8 @@ ruamel.yaml==0.18.14 ruamel.yaml.clib==0.2.12 scikit-learn==1.7.1 scipy==1.16.0 -semantic-kernel==1.36.0 +agent-framework>=1.0.0b1 +agent-framework-azure-ai>=1.0.0b1 six==1.17.0 sniffio==1.3.1 sse-starlette==2.4.1 @@ -144,4 +145,5 @@ azure-ai-evaluation==1.9.0 azure-functions azure-cosmos azure-identity -semantic-kernel==1.36.0 \ No newline at end of file +agent-framework>=1.0.0b1 +agent-framework-azure-ai>=1.0.0b1 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index d71ae43..35dbfc7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,8 @@ azure-ai-projects==1.0.0b12 azure-common==1.1.28 azure-core==1.35.0 azure-identity==1.23.1 +agent-framework>=1.0.0b1 +agent-framework-azure-ai>=1.0.0b1 azure-search==1.0.0b2 azure-search-documents==11.5.3 azure-storage-blob==12.25.1