diff --git a/.env.example b/.env.example index 044e715..a20ea92 100644 --- a/.env.example +++ b/.env.example @@ -1,5 +1,5 @@ AGENTUITY_SDK_KEY= -AGENTUITY_PROJECT_KEY= +AGENTUITY_PROJECT_KEY= # example-composio # COMPOSIO_API_KEY= @@ -11,5 +11,24 @@ AGENTUITY_PROJECT_KEY= # SLACK_SIGNING_SECRET= # SLACK_BOT_TOKEN= +# example-telegram +# TELEGRAM_BOT_TOKEN= + +# example-teams +# TEAMS_BOT_APP_ID= +# TEAMS_BOT_APP_PASSWORD= +# TEAMS_BOT_TENANT_ID= +# TEAMS_TEST_USER_KEY= # Get this by chatting with the bot, then check logs for "userKey" field + # gateway-byo-token - Only use if you want to bypass the AI gateway -# ANTHROPIC_API_KEY= \ No newline at end of file +# ANTHROPIC_API_KEY= + +# io-email - Email address for the io-email agent (configure in Agentuity Console) +# IO_EMAIL_ADDRESS= + +# Checkly Heartbeat URLs - Create monitors at https://app.checklyhq.com +# CHECKLY_TEST_SUITE_URL= +# CHECKLY_EXAMPLE_SLACK_URL= +# CHECKLY_EXAMPLE_TEAMS_URL= +# CHECKLY_IO_EMAIL_URL= +# CHECKLY_IO_SMS_URL= \ No newline at end of file diff --git a/.github/example-teams/teams-setup-1-azure-home.png b/.github/example-teams/teams-setup-1-azure-home.png new file mode 100644 index 0000000..ef526cd Binary files /dev/null and b/.github/example-teams/teams-setup-1-azure-home.png differ diff --git a/.github/example-teams/teams-setup-10-import-app.png b/.github/example-teams/teams-setup-10-import-app.png new file mode 100644 index 0000000..0cf8d9f Binary files /dev/null and b/.github/example-teams/teams-setup-10-import-app.png differ diff --git a/.github/example-teams/teams-setup-11-teams-upload.png b/.github/example-teams/teams-setup-11-teams-upload.png new file mode 100644 index 0000000..eec10f5 Binary files /dev/null and b/.github/example-teams/teams-setup-11-teams-upload.png differ diff --git a/.github/example-teams/teams-setup-12-kv-storage.png b/.github/example-teams/teams-setup-12-kv-storage.png new file mode 100644 index 0000000..f5669f8 Binary files /dev/null and b/.github/example-teams/teams-setup-12-kv-storage.png differ diff --git a/.github/example-teams/teams-setup-2-categories.png b/.github/example-teams/teams-setup-2-categories.png new file mode 100644 index 0000000..e884d65 Binary files /dev/null and b/.github/example-teams/teams-setup-2-categories.png differ diff --git a/.github/example-teams/teams-setup-3-create-bot.png b/.github/example-teams/teams-setup-3-create-bot.png new file mode 100644 index 0000000..c5cf4b6 Binary files /dev/null and b/.github/example-teams/teams-setup-3-create-bot.png differ diff --git a/.github/example-teams/teams-setup-4-bot-credentials.png b/.github/example-teams/teams-setup-4-bot-credentials.png new file mode 100644 index 0000000..efe6769 Binary files /dev/null and b/.github/example-teams/teams-setup-4-bot-credentials.png differ diff --git a/.github/example-teams/teams-setup-5-channels.png b/.github/example-teams/teams-setup-5-channels.png new file mode 100644 index 0000000..4b105d0 Binary files /dev/null and b/.github/example-teams/teams-setup-5-channels.png differ diff --git a/.github/example-teams/teams-setup-6-channels-details.png b/.github/example-teams/teams-setup-6-channels-details.png new file mode 100644 index 0000000..ac6f2ef Binary files /dev/null and b/.github/example-teams/teams-setup-6-channels-details.png differ diff --git a/.github/example-teams/teams-setup-7-messaging-endpoint.png b/.github/example-teams/teams-setup-7-messaging-endpoint.png new file mode 100644 index 0000000..fe9be94 Binary files /dev/null and b/.github/example-teams/teams-setup-7-messaging-endpoint.png differ diff --git a/.github/example-teams/teams-setup-8-test-webchat.png b/.github/example-teams/teams-setup-8-test-webchat.png new file mode 100644 index 0000000..e3c35c8 Binary files /dev/null and b/.github/example-teams/teams-setup-8-test-webchat.png differ diff --git a/.github/example-teams/teams-setup-9-validate-manifest.png b/.github/example-teams/teams-setup-9-validate-manifest.png new file mode 100644 index 0000000..bb83b5a Binary files /dev/null and b/.github/example-teams/teams-setup-9-validate-manifest.png differ diff --git a/.github/example-telegram/telegram-setup-1-api-endpoint.png b/.github/example-telegram/telegram-setup-1-api-endpoint.png new file mode 100644 index 0000000..a3ca186 Binary files /dev/null and b/.github/example-telegram/telegram-setup-1-api-endpoint.png differ diff --git a/.github/example-telegram/telegram-setup-2-bot-father.png b/.github/example-telegram/telegram-setup-2-bot-father.png new file mode 100644 index 0000000..505c744 Binary files /dev/null and b/.github/example-telegram/telegram-setup-2-bot-father.png differ diff --git a/.github/example-telegram/telegram-setup-3-token.png b/.github/example-telegram/telegram-setup-3-token.png new file mode 100644 index 0000000..b1480c7 Binary files /dev/null and b/.github/example-telegram/telegram-setup-3-token.png differ diff --git a/README.md b/README.md index 9ad084b..c81e60b 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,11 @@ Each agent demonstrates specific Agentuity features. Here's what you can explore | **example-chat** | Conversational AI with persistent chat history | | **example-composio** | Integration with Composio tools (Hacker News) | | **example-discord** | Discord webhook notifications | +| **example-llm-judge** | LLM-as-a-judge pattern for evaluating AI outputs | | **example-slack** | Slack bot integration with thread support | +| **example-streaming** | Real-time data streaming | +| **example-teams** | Microsoft Teams bot integration with persistent chat history | +| **example-telegram** | Telegram bot integration | ## How to Use in DevMode diff --git a/agentuity.yaml b/agentuity.yaml index 952d410..010f135 100644 --- a/agentuity.yaml +++ b/agentuity.yaml @@ -50,11 +50,11 @@ deployment: # You should tune the resources for the deployment resources: # The memory requirements - memory: 2Gi + memory: 4Gi # The CPU requirements - cpu: 2000M + cpu: 4000M # The disk size requirements - disk: 250Mi + disk: 3Gi # The deployment mode mode: # on-demand or provisioned @@ -145,3 +145,15 @@ agents: - id: agent_fee72e13a3fdc7f0783abd65220d352d name: test-suite description: Tests the functionality of all Kitchen Sink example agents + - id: agent_52f73139a881ee0e1cdfafb3c6404e70 + name: example-telegram + description: Demonstrates how to integrate with a Telegram bot + - id: agent_c38f9a6d8d3edc6bde56047cbfd16c6f + name: example-streaming + description: Demonstrates advanced agent streaming patterns + - id: agent_4268cac212e32dae1f6a7c394d2c6b9d + name: example-llm-judge + description: Demonstrates LLM-as-a-judge pattern for evaluating AI outputs + - id: agent_ebc87cc45db854eff103e3d54cefa24a + name: example-teams + description: Demonstrates how to integrate with a Microsoft Teams bot diff --git a/bun.lock b/bun.lock index a6b2fc8..fe9117f 100644 --- a/bun.lock +++ b/bun.lock @@ -4,9 +4,10 @@ "": { "name": "kitchen-sink-ts", "dependencies": { - "@agentuity/sdk": "^0.0.144", + "@agentuity/sdk": "^0.0.157", "@ai-sdk/anthropic": "^2.0.17", "@ai-sdk/google": "^2.0.11", + "@ai-sdk/groq": "^2.0.24", "@ai-sdk/openai": "^2.0.23", "@composio/core": "^0.1.52", "@composio/vercel": "^0.2.8", @@ -14,8 +15,10 @@ "@slack/types": "^2.16.0", "@slack/web-api": "^7.10.0", "ai": "^5.0.29", + "botbuilder": "^4.23.3", "crypto": "^1.0.1", "source-map-js": "^1.2.1", + "zod": "^4.1.12", }, "devDependencies": { "@biomejs/biome": "2.2.2", @@ -29,7 +32,7 @@ "packages": { "@a2a-js/sdk": ["@a2a-js/sdk@0.2.5", "", { "dependencies": { "@types/cors": "^2.8.17", "@types/express": "^4.17.23", "body-parser": "^2.2.0", "cors": "^2.8.5", "express": "^4.21.2", "uuid": "^11.1.0" } }, "sha512-VTDuRS5V0ATbJ/LkaQlisMnTAeYKXAK6scMguVBstf+KIBQ7HIuKhiXLv+G/hvejkV+THoXzoNifInAkU81P1g=="], - "@agentuity/sdk": ["@agentuity/sdk@0.0.144", "", { "dependencies": { "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.57.2", "@opentelemetry/auto-instrumentations-node": "^0.56.1", "@opentelemetry/core": "^1.21.0", "@opentelemetry/exporter-logs-otlp-http": "^0.57.2", "@opentelemetry/exporter-metrics-otlp-http": "^0.57.2", "@opentelemetry/exporter-trace-otlp-http": "^0.57.2", "@opentelemetry/host-metrics": "^0.35.5", "@opentelemetry/resources": "^1.30.1", "@opentelemetry/sdk-logs": "^0.57.2", "@opentelemetry/sdk-metrics": "^1.30.1", "@opentelemetry/sdk-node": "^0.57.2", "@opentelemetry/semantic-conventions": "^1.30.0", "@traceloop/node-server-sdk": "^0.18.1", "js-yaml": "^4.1.0", "mailparser": "^3.7.4", "nodemailer": "^7.0.3" }, "peerDependencies": { "botbuilder": "^4.23.2", "typescript": "^5" } }, "sha512-paVf0N5bQt29UgWgzgdm0/FX2W+XGJbLtoHIhCdvsvB1phxvQtHXkr2GskKOnPn4h53wjGDScHRl7NcqR6kulQ=="], + "@agentuity/sdk": ["@agentuity/sdk@0.0.157", "", { "dependencies": { "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.57.2", "@opentelemetry/auto-instrumentations-node": "^0.56.1", "@opentelemetry/core": "^1.21.0", "@opentelemetry/exporter-logs-otlp-http": "^0.57.2", "@opentelemetry/exporter-metrics-otlp-http": "^0.57.2", "@opentelemetry/exporter-trace-otlp-http": "^0.57.2", "@opentelemetry/host-metrics": "^0.35.5", "@opentelemetry/resources": "^1.30.1", "@opentelemetry/sdk-logs": "^0.57.2", "@opentelemetry/sdk-metrics": "^1.30.1", "@opentelemetry/sdk-node": "^0.57.2", "@opentelemetry/semantic-conventions": "^1.30.0", "@traceloop/node-server-sdk": "^0.18.1", "js-yaml": "^4.1.0", "mailparser": "^3.7.4", "nodemailer": "^7.0.3" }, "peerDependencies": { "botbuilder": "^4.23.2", "typescript": "^5" } }, "sha512-xG4gFEDobY9+oyr5WP5sNBBGsm4urs7USkAn9a64qPb1td5UkBMsp3NqBGkYzgyHKiFG9uNxMCma21Np3IfEdA=="], "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.9" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-WPlhSuIiTU0KRESMyhLwCTfrtgD0GsI6px4Q7hqidTvyfDhPDeXMB0q1WShzJPNRTrjpfWRDLuYdLEL/y/+mLQ=="], @@ -37,6 +40,8 @@ "@ai-sdk/google": ["@ai-sdk/google@2.0.16", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.9" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-VN9QO1syIyKxYfAiS5QPZkw0caV6n/mmyvJui3T+lkhGPS2RXTv7nDrtRv8TroNzdPcPWoaZMgJRJD2W7oEZ+g=="], + "@ai-sdk/groq": ["@ai-sdk/groq@2.0.24", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.12" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-PCtNwFsakxR6B/o+l3gtxlPIwN8lawK3vvOjRdC759Y8WtNxCv5RUs0JsxIKyAZxO+RBEy0AoL8xTQUy8fn3gw=="], + "@ai-sdk/openai": ["@ai-sdk/openai@2.0.23", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.7" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-uOXk8HzmMUoCmD0JMX/Y1HC/ABOR/Jza2Z2rkCaJISDYz3fp5pnb6eNjcPRL48JSMzRAGp9UP5p0OpxS06IJZg=="], "@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="], @@ -1031,7 +1036,7 @@ "yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], - "zod": ["zod@4.1.11", "", {}, "sha512-WPsqwxITS2tzx1bzhIKsEs19ABD5vmCVa4xBo2tq/SrV4RNZtfws1EnCWQXM6yh8bD08a1idvkB5MZSBiZsjwg=="], + "zod": ["zod@4.1.12", "", {}, "sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ=="], "zod-from-json-schema": ["zod-from-json-schema@0.5.0", "", { "dependencies": { "zod": "^4.0.17" } }, "sha512-W1v1YIoimOJfvuorGGp1QroizLL3jEGELJtgrHiVg/ytxVZdh/BTTVyPypGB7YK30LHrCkkebbjuyHIjBGCEzw=="], @@ -1041,6 +1046,8 @@ "@ai-sdk/gateway/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.7", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-o3BS5/t8KnBL3ubP8k3w77AByOypLm+pkIL/DCw0qKkhDbvhCy+L3hRTGPikpdb8WHcylAeKsjgwOxhj4cqTUA=="], + "@ai-sdk/groq/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.12", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ZtbdvYxdMoria+2SlNarEk6Hlgyf+zzcznlD55EAl+7VZvJaSg2sqPvwArY7L6TfDEDJsnCq0fdhBSkYo0Xqdg=="], + "@ai-sdk/openai/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.7", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4" } }, "sha512-o3BS5/t8KnBL3ubP8k3w77AByOypLm+pkIL/DCw0qKkhDbvhCy+L3hRTGPikpdb8WHcylAeKsjgwOxhj4cqTUA=="], "@ai-sdk/react/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@2.2.8", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "nanoid": "^3.3.8", "secure-json-parse": "^2.7.0" }, "peerDependencies": { "zod": "^3.23.8" } }, "sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA=="], @@ -1049,8 +1056,6 @@ "@ai-sdk/ui-utils/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@2.2.8", "", { "dependencies": { "@ai-sdk/provider": "1.1.3", "nanoid": "^3.3.8", "secure-json-parse": "^2.7.0" }, "peerDependencies": { "zod": "^3.23.8" } }, "sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA=="], - "@ai-sdk/ui-utils/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - "@azure/identity/@azure/msal-node": ["@azure/msal-node@3.7.3", "", { "dependencies": { "@azure/msal-common": "15.12.0", "jsonwebtoken": "^9.0.0", "uuid": "^8.3.0" } }, "sha512-MoJxkKM/YpChfq4g2o36tElyzNUMG8mfD6u8NbuaPAsqfGpaw249khAcJYNoIOigUzRw45OjXCOrexE6ImdUxg=="], "@azure/msal-browser/@azure/msal-common": ["@azure/msal-common@15.12.0", "", {}, "sha512-4ucXbjVw8KJ5QBgnGJUeA07c8iznwlk5ioHIhI4ASXcXgcf2yRFhWzYOyWg/cI49LC9ekpFJeQtO3zjDTbl6TQ=="], @@ -1301,20 +1306,16 @@ "type-is/mime-types": ["mime-types@3.0.1", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA=="], - "zod-from-json-schema-v3/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], + "zod-from-json-schema/zod": ["zod@4.1.11", "", {}, "sha512-WPsqwxITS2tzx1bzhIKsEs19ABD5vmCVa4xBo2tq/SrV4RNZtfws1EnCWQXM6yh8bD08a1idvkB5MZSBiZsjwg=="], - "zod-to-json-schema/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], + "zod-from-json-schema-v3/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], "@ai-sdk/react/@ai-sdk/provider-utils/@ai-sdk/provider": ["@ai-sdk/provider@1.1.3", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg=="], - "@ai-sdk/react/@ai-sdk/provider-utils/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - "@azure/identity/@azure/msal-node/@azure/msal-common": ["@azure/msal-common@15.12.0", "", {}, "sha512-4ucXbjVw8KJ5QBgnGJUeA07c8iznwlk5ioHIhI4ASXcXgcf2yRFhWzYOyWg/cI49LC9ekpFJeQtO3zjDTbl6TQ=="], "@azure/identity/@azure/msal-node/uuid": ["uuid@8.3.2", "", { "bin": { "uuid": "dist/bin/uuid" } }, "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="], - "@mastra/core/@ai-sdk/provider-utils/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - "@mastra/core/@opentelemetry/auto-instrumentations-node/@opentelemetry/instrumentation": ["@opentelemetry/instrumentation@0.203.0", "", { "dependencies": { "@opentelemetry/api-logs": "0.203.0", "import-in-the-middle": "^1.8.1", "require-in-the-middle": "^7.1.1" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-ke1qyM+3AK2zPuBPb6Hk/GCsc5ewbLvPNkEuELx/JmANeEp6ZjnZ+wypPAJSucTw0wvCGrUaibDSdcrGFoWxKQ=="], "@mastra/core/@opentelemetry/auto-instrumentations-node/@opentelemetry/instrumentation-amqplib": ["@opentelemetry/instrumentation-amqplib@0.50.0", "", { "dependencies": { "@opentelemetry/core": "^2.0.0", "@opentelemetry/instrumentation": "^0.203.0", "@opentelemetry/semantic-conventions": "^1.27.0" }, "peerDependencies": { "@opentelemetry/api": "^1.3.0" } }, "sha512-kwNs/itehHG/qaQBcVrLNcvXVPW0I4FCOVtw3LHMLdYIqD7GJ6Yv2nX+a4YHjzbzIeRYj8iyMp0Bl7tlkidq5w=="], @@ -1427,8 +1428,6 @@ "@mastra/core/@opentelemetry/sdk-node/@opentelemetry/sdk-logs": ["@opentelemetry/sdk-logs@0.203.0", "", { "dependencies": { "@opentelemetry/api-logs": "0.203.0", "@opentelemetry/core": "2.0.1", "@opentelemetry/resources": "2.0.1" }, "peerDependencies": { "@opentelemetry/api": ">=1.4.0 <1.10.0" } }, "sha512-vM2+rPq0Vi3nYA5akQD2f3QwossDnTDLvKbea6u/A2NZ3XDkPxMfo/PNrDoXhDUD/0pPo2CdH5ce/thn9K0kLw=="], - "@mastra/core/ai/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - "@opentelemetry/exporter-logs-otlp-grpc/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base": ["@opentelemetry/sdk-trace-base@1.30.1", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/resources": "1.30.1", "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-jVPgBbH1gCy2Lb7X0AVQ8XAfgg0pJ4nvl8/IiQA6nxOsPvS+0zMJaFSs2ltXe0J6C8dqjcnpyqINDJmU30+uOg=="], "@opentelemetry/exporter-logs-otlp-http/@opentelemetry/otlp-transformer/@opentelemetry/sdk-trace-base": ["@opentelemetry/sdk-trace-base@1.30.1", "", { "dependencies": { "@opentelemetry/core": "1.30.1", "@opentelemetry/resources": "1.30.1", "@opentelemetry/semantic-conventions": "1.28.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "sha512-jVPgBbH1gCy2Lb7X0AVQ8XAfgg0pJ4nvl8/IiQA6nxOsPvS+0zMJaFSs2ltXe0J6C8dqjcnpyqINDJmU30+uOg=="], diff --git a/package.json b/package.json index d8d0f28..6fb8499 100644 --- a/package.json +++ b/package.json @@ -28,9 +28,10 @@ "typescript": "^5" }, "dependencies": { - "@agentuity/sdk": "^0.0.146", + "@agentuity/sdk": "^0.0.157", "@ai-sdk/anthropic": "^2.0.17", "@ai-sdk/google": "^2.0.11", + "@ai-sdk/groq": "^2.0.24", "@ai-sdk/openai": "^2.0.23", "@composio/core": "^0.1.52", "@composio/vercel": "^0.2.8", @@ -38,8 +39,10 @@ "@slack/types": "^2.16.0", "@slack/web-api": "^7.10.0", "ai": "^5.0.29", + "botbuilder": "^4.23.3", "crypto": "^1.0.1", - "source-map-js": "^1.2.1" + "source-map-js": "^1.2.1", + "zod": "^4.1.12" }, "module": "index.ts" -} \ No newline at end of file +} diff --git a/src/agents/example-composio/README.md b/src/agents/example-composio/README.md index d0b938a..837a7d1 100644 --- a/src/agents/example-composio/README.md +++ b/src/agents/example-composio/README.md @@ -41,7 +41,7 @@ This example uses the HackerNews toolkit, but Composio offers many more, includi ![Composio project settings showing API key](/.github/example-composio/composio-setup-2-api-key.png) Copy the API key and add it to your `.env` file: - ``` + ```env COMPOSIO_API_KEY=your-api-key-here ``` diff --git a/src/agents/example-discord/README.md b/src/agents/example-discord/README.md index 5e3f241..3c42d23 100644 --- a/src/agents/example-discord/README.md +++ b/src/agents/example-discord/README.md @@ -26,7 +26,7 @@ 2. **Configure Environment Variable** For local development, add to your `.env` file: - ``` + ```env DISCORD_WEBHOOK_URL=https://discord.com/api/webhooks/YOUR_WEBHOOK_ID/YOUR_WEBHOOK_TOKEN ``` diff --git a/src/agents/example-llm-judge/index.ts b/src/agents/example-llm-judge/index.ts new file mode 100644 index 0000000..e0559c4 --- /dev/null +++ b/src/agents/example-llm-judge/index.ts @@ -0,0 +1,82 @@ +import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; +import { openai } from '@ai-sdk/openai'; +import { generateObject } from 'ai'; +import { evaluationSchema, formatReport } from './story-eval'; + +export default async function Agent( + req: AgentRequest, + resp: AgentResponse, + ctx: AgentContext +) { + try { + // Get the prompt from request, or use a default + const prompt = + (await req.data.text()) || + 'Write a short story about an AI learning to paint'; + + ctx.logger.info('Starting LLM-as-a-judge evaluation'); + + // Get stories from gateway-provider + const gatewayAgent = await ctx.getAgent({ name: 'gateway-provider' }); + const stories = await gatewayAgent.run({ data: prompt }); + const storiesText = await stories.data.text(); + + ctx.logger.debug('Received stories from gateway-provider'); + + // Create evaluation prompt + const evaluationPrompt = ` +You are evaluating two AI-generated short stories. + +Here are the stories: + +${storiesText} + +Extract each story text: +- OpenAI story: appears after "### OpenAI (GPT-5 Nano)" +- Google story: appears after "### Google (Gemini 2.0 Flash)" + +For each story, provide: +1. Creativity score (1-10): How original and imaginative is it? +2. Quality score (1-10): Overall writing quality +3. Strengths: What works well (1-2 sentences) + +Finally, provide a verdict declaring which story is better and why (2-3 sentences).`; + + ctx.logger.info('Generating structured evaluation'); + + // Generate structured evaluation + const evaluation = await generateObject({ + model: openai('gpt-5-nano'), + schema: evaluationSchema, + system: + 'You are a literary critic evaluating short AI-generated stories.', + prompt: evaluationPrompt, + }); + + // Log key metrics + ctx.logger.debug('Evaluation scores', { + openai: evaluation.object.openai.quality, + google: evaluation.object.google.quality, + }); + + // Return formatted report + return resp.markdown(formatReport(evaluation.object)); + } catch (error) { + ctx.logger.error('Error in LLM judge evaluation:', error); + return resp.text( + 'Sorry, there was an error running the evaluation. Please ensure the gateway-provider agent is available.' + ); + } +} + +export const welcome = () => { + return { + welcome: `Welcome to the LLM-as-a-Judge example agent.\n\n### About\n\nThis agent demonstrates the LLM-as-a-judge pattern, where one AI model evaluates the outputs of other models. It generates content using the gateway-provider agent (which uses two different AI models), then evaluates both outputs with structured scoring and feedback.\n\n### Testing\n\nTry the default prompt about AI learning to paint, or send your own story prompt. The agent calls \`gateway-provider\` to generate two stories (one from each AI model), then provides an evaluation comparing their creativity, quality, and strengths.\n\n### Questions?\n\nThe "Help" command is not available for this agent, as it's a specific example demonstration.`, + prompts: [ + { + data: 'Write a short story about an AI learning to paint', + contentType: 'text/plain', + }, + ], + }; +}; diff --git a/src/agents/example-llm-judge/story-eval.ts b/src/agents/example-llm-judge/story-eval.ts new file mode 100644 index 0000000..b0c2ea1 --- /dev/null +++ b/src/agents/example-llm-judge/story-eval.ts @@ -0,0 +1,56 @@ +import { z } from 'zod'; + +// Story evaluation structure +const storySchema = z.object({ + text: z.string().describe('The story content'), + creativity: z + .number() + .min(1) + .max(10) + .describe('How original and imaginative'), + quality: z.number().min(1).max(10).describe('Overall writing quality'), + strengths: z.string().describe('What works well (1-2 sentences)'), +}); + +// Complete evaluation schema +export const evaluationSchema = z.object({ + openai: storySchema, + google: storySchema, + verdict: z.string().describe('Which story is better and why (2-3 sentences)'), +}); + +export type Evaluation = z.infer; + +// Format evaluation as markdown report +export function formatReport(evaluation: Evaluation): string { + return `## 📚 Story Evaluation Report + +### Generated Stories + +#### OpenAI (GPT-5 Nano) +${evaluation.openai.text} + +--- + +#### Google (Gemini 2.0 Flash) +${evaluation.google.text} + +--- + +### 📊 Evaluation Results + +#### OpenAI Story +- **Creativity:** ${evaluation.openai.creativity}/10 +- **Quality:** ${evaluation.openai.quality}/10 +- **Strengths:** ${evaluation.openai.strengths} + +#### Google Story +- **Creativity:** ${evaluation.google.creativity}/10 +- **Quality:** ${evaluation.google.quality}/10 +- **Strengths:** ${evaluation.google.strengths} + +--- + +### 🏆 Verdict +${evaluation.verdict}`; +} diff --git a/src/agents/example-slack/.slack.manifest b/src/agents/example-slack/.slack.manifest index c11e13a..dd917a1 100644 --- a/src/agents/example-slack/.slack.manifest +++ b/src/agents/example-slack/.slack.manifest @@ -21,8 +21,7 @@ "event_subscriptions": { "request_url": "{AGENT IO API ENDPOINT URL}", "bot_events": [ - "app_mention", - "message.channels" + "app_mention" ] }, "org_deploy_enabled": false, diff --git a/src/agents/example-slack/index.ts b/src/agents/example-slack/index.ts index 47e7442..1bbca18 100644 --- a/src/agents/example-slack/index.ts +++ b/src/agents/example-slack/index.ts @@ -1,13 +1,13 @@ import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; -import { openai } from '@ai-sdk/openai'; -import type { GenericMessageEvent } from '@slack/types'; +import { groq } from '@ai-sdk/groq'; +import type { AppMentionEvent, GenericMessageEvent } from '@slack/types'; import { WebClient } from '@slack/web-api'; import { type AssistantModelMessage, generateText, type UserModelMessage, } from 'ai'; -import { handleError } from '../../lib/utils'; +import { handleError, handleSuccess } from '../../lib/utils'; import type { SlackAgentRequest } from './slack'; import { verifySlackWebhook } from './slack'; @@ -25,35 +25,53 @@ export default async function Agent( resp: AgentResponse, ctx: AgentContext ) { - // No manual trigger handling - if (req.trigger === 'manual') { - return resp.text('This agent only responds to Slack triggers.'); - } - - // Verify Slack webhook and handle challenge/validation before processing the request - const verificationResponse = await verifySlackWebhook( - req as SlackAgentRequest, - resp, - ctx - ); - - if (verificationResponse) { - return verificationResponse; - } - - const slack = new WebClient(process.env.SLACK_BOT_TOKEN); - const { event } = await req.data.object<{ event: GenericMessageEvent }>(); - const threadTs = event.thread_ts ?? event.ts; // Acts as the UUID for the conversation - - // Slack wants a fast 200 OK response, so we return that immediately - const response = resp.text('OK'); - try { - (async () => { - // Build conversation history from Slack thread - const messages: Message[] = []; - + // Ignore Slack retry attempts to prevent duplicate processing + const retryNum = (req as SlackAgentRequest).metadata.headers[ + 'x-slack-retry-num' + ]; + if (retryNum) { + ctx.logger.info('Ignoring Slack retry attempt', { + retryNum, + retryReason: (req as SlackAgentRequest).metadata.headers[ + 'x-slack-retry-reason' + ], + }); + return resp.text('OK'); + } + + // No manual trigger handling + if (req.trigger === 'manual') { + return resp.text('This agent only responds to Slack triggers.'); + } + + // Verify Slack webhook and handle challenge/validation before processing the request + const verificationResponse = await verifySlackWebhook( + req as SlackAgentRequest, + resp, + ctx + ); + + if (verificationResponse) { + return verificationResponse; + } + + const { event } = await req.data.object<{ + event: GenericMessageEvent | AppMentionEvent; + }>(); + const threadTs = event.thread_ts ?? event.ts; // Acts as the UUID for the conversation + + // Slack wants a fast 200 OK response, so we return that immediately + const response = resp.text('OK'); + + // Use waitUntil to process message in the background + ctx.waitUntil(async () => { try { + const slack = new WebClient(process.env.SLACK_BOT_TOKEN); + + // Build conversation history from Slack thread + const messages: Message[] = []; + // Determine if this is a thread reply or a new message if (threadTs !== event.ts) { // This is a thread reply, get all thread messages @@ -79,41 +97,39 @@ export default async function Agent( content: event.text || '', }); } - } catch (error) { - ctx.logger.warn('Failed to generate conversation:', error); + // Generate a reply + const result = await generateText({ + // Groq is ideal for fast responses + model: groq('openai/gpt-oss-20b'), + system: `You are a helpful Slack bot assistant that can have a conversation with the user. Try to limit your response length.`, + messages, + }); + + // Post reply to Slack thread + await slack.chat.postMessage({ + channel: event.channel, + thread_ts: threadTs, + text: result.text, + }); + + // Ping Checkly on success + await handleSuccess( + ctx, + 'example-slack', + process.env.CHECKLY_EXAMPLE_SLACK_URL + ); + } catch (error) { + ctx.logger.error('Error processing Slack message:', error); handleError('example-slack'); // Used for Kitchen Sink testing purposes - - return new Response('Internal Server Error', { status: 500 }); + // Error contained in background task - let it complete gracefully without throwing } - - // Generate a reply - const result = await generateText({ - model: openai('gpt-5-mini'), - system: `You are a helpful Slack bot assistant that can have a conversation with the user. Try to limit your response length.`, - messages, - }); - - // Post reply to Slack thread - await slack.chat.postMessage({ - channel: event.channel, - thread_ts: threadTs, - text: result.text, - }); - })().catch((error) => { - ctx.logger.error('Error processing Slack message:', error); - - handleError('example-slack'); // Used for Kitchen Sink testing purposes - - return new Response('Internal Server Error', { status: 500 }); }); return response; } catch (error) { ctx.logger.error('Error running agent:', error); - handleError('example-slack'); // Used for Kitchen Sink testing purposes - return new Response('Internal Server Error', { status: 500 }); } } diff --git a/src/agents/example-slack/slack.ts b/src/agents/example-slack/slack.ts index 8da7112..a301b4c 100644 --- a/src/agents/example-slack/slack.ts +++ b/src/agents/example-slack/slack.ts @@ -4,7 +4,7 @@ import type { AgentResponse, AgentResponseData, } from '@agentuity/sdk'; -import type { GenericMessageEvent } from '@slack/types'; +import type { AppMentionEvent, GenericMessageEvent } from '@slack/types'; import crypto from 'crypto'; // Agentuity request morphs the request data, so we need to type it correctly @@ -13,6 +13,8 @@ export type SlackAgentRequest = AgentRequest & { headers: { 'x-slack-signature'?: string; 'x-slack-request-timestamp'?: string; + 'x-slack-retry-num'?: string; + 'x-slack-retry-reason'?: string; [key: string]: string | undefined; }; }; @@ -70,7 +72,7 @@ export async function verifySlackWebhook( const eventData = body as { type?: string; challenge?: string; - event?: GenericMessageEvent; + event?: GenericMessageEvent | AppMentionEvent; }; // Handle Slack challenge verification @@ -93,18 +95,18 @@ export async function verifySlackWebhook( // biome-ignore lint/suspicious/noExplicitAny: Slack type def is missing user_id const botProfile = eventData.event.bot_profile as any; - if (botProfile.user_id && botProfile.user_id === eventData.event.user) { + if (botProfile?.user_id && botProfile.user_id === eventData.event.user) { ctx.logger.debug('Message from the agent itself'); return resp.empty(); } - // Check if the event is a message as opposed to something like a reaction - if ( - eventData.event.type !== 'message' || - typeof eventData.event.subtype !== 'undefined' - ) { - ctx.logger.debug('Not a message'); + // Check if the event is an app_mention + if (eventData.event.type !== 'app_mention') { + ctx.logger.info('Not an app_mention event, ignoring', { + eventType: eventData.event.type, + eventId: eventData.event.event_ts, + }); return resp.empty(); } diff --git a/src/agents/example-streaming/index.ts b/src/agents/example-streaming/index.ts new file mode 100644 index 0000000..b938220 --- /dev/null +++ b/src/agents/example-streaming/index.ts @@ -0,0 +1,288 @@ +import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; +import { openai } from '@ai-sdk/openai'; +import { streamText } from 'ai'; +import { handleHelpMessage } from '../../lib/utils'; + +export default async function Agent( + req: AgentRequest, + resp: AgentResponse, + ctx: AgentContext +) { + /*************** + * Boilerplate * + ***************/ + + const help = await handleHelpMessage(req, resp, ctx, 'agent streaming'); + + if (help) { + return help; + } + + /************ + * Examples * + ************/ + + const prompt = await req.data.text(); + + // Agent-to-agent streaming with resp.stream() + if (prompt === 'Agent Chain') { + try { + ctx.logger.info('Initiating agent chain: calling example-chat'); + + // Get reference to another agent in the project + const chatAgent = await ctx.getAgent({ name: 'example-chat' }); + + ctx.logger.debug( + 'Requesting "streaming benefits" explanation from example-chat agent' + ); + + // Call the agent with a custom prompt about streaming + const response = await chatAgent.run({ + data: 'Write a response with the title "## Benefits of Real-Time Streaming" followed by 3 key benefits of streaming data in real-time applications. Be concise but helpful.', + }); + + ctx.logger.info( + 'Received response from example-chat, starting stream forwarding' + ); + + // Get and forward the stream from the agent's response + const responseStream = await response.data.stream(); + + ctx.logger.debug('Stream forwarding initiated successfully'); + + // Forward the stream directly to the client using resp.stream() + return resp.stream(responseStream, 'text/markdown'); + } catch (error) { + ctx.logger.error('Error in agent chain streaming:', error); + return resp.text( + 'Sorry, there was an error with agent chaining. Please ensure the example-chat agent is available.' + ); + } + } + + // LLM streaming with pipeTo + if (prompt === 'LLM Streaming') { + try { + ctx.logger.info('Starting LLM streaming with company overview'); + + // Fetch Agentuity company information + const response = await fetch('https://agentuity.com/llms.txt'); + const companyInfo = await response.text(); + + // Create a stream with metadata + const stream = await ctx.stream.create('llm-summary', { + contentType: 'text/markdown', + metadata: { + type: 'llm-generation', + model: 'gpt-5-nano', + requestId: ctx.sessionId, + }, + }); + + ctx.logger.info('Stream created', { + streamId: stream.id, + streamUrl: stream.url, + }); + + // Use waitUntil to handle streaming in the background + ctx.waitUntil(async () => { + try { + const result = streamText({ + model: openai('gpt-5-nano'), + system: + 'You are a technical writer creating executive summaries. Write in a professional, detailed style.', + prompt: `Based on this company information, write a detailed executive summary (4-5 paragraphs) covering:\n1. Overview and core value proposition\n2. Key features and capabilities\n3. Benefits for developers and teams\n4. Target users and use cases\n5. Unique advantages\n\nCompany Information:\n${companyInfo}`, + }); + + // Pipe the text stream to our created stream + await result.textStream.pipeTo(stream); + + ctx.logger.info('LLM streaming completed'); + } catch (error) { + ctx.logger.error('Error in LLM streaming:', error); + // Note: pipeTo automatically closes/aborts the stream, no need to close manually + } + }); + + /** + * Return stream info immediately (non-blocking) + * + * Note: You could also return the stream directly with `return stream` + * This would automatically redirect the client to the stream URL + * instead of returning JSON with the stream info + */ + return resp.json({ + streamId: stream.id, + streamUrl: stream.url, + status: 'streaming', + message: + 'Generating executive summary from company overview. Stream URL will show AI response as it generates.', + }); + } catch (error) { + ctx.logger.error('Error in LLM streaming:', error); + return resp.text('Sorry, there was an error processing your request.'); + } + } + + // Low-level (manual) streaming: stream.write() with progress tracking + if (prompt === 'Manual Streaming') { + try { + ctx.logger.info('Starting batch processing example'); + + // Generate a batch of items to process + const batchSize = 100; + const items = Array.from({ length: batchSize }, (_, i) => ({ + id: `item-${String(i + 1).padStart(3, '0')}`, + timestamp: new Date().toISOString(), + status: 'pending', + data: { + value: Math.random() * 1000, + category: ['A', 'B', 'C', 'D'][i % 4], + priority: ['low', 'medium', 'high'][i % 3], + }, + })); + + // Create a demo stream to show stream deletion + const demoStream = await ctx.stream.create('batch-processing', { + contentType: 'application/json', + metadata: { + type: 'batch-processing', + purpose: 'demo', + startTime: new Date(Date.now() - 600000).toISOString(), // 10 minutes ago + }, + }); + + ctx.logger.info('Created demo stream for deletion'); + + // Write some sample data and close it + await demoStream.write( + JSON.stringify({ demo: true, message: 'This stream will be deleted' }) + + '\n' + ); + await demoStream.close(); + + // Create another stream + const stream = await ctx.stream.create('batch-processing', { + contentType: 'application/json', + // Optionally, enable automatic gzip compression: `compress: true,` + metadata: { + // Metadata: organize streams by purpose, timestamps (for TTL-based cleanup), user IDs, etc. + type: 'batch-processing', + purpose: 'production', + batchSize: String(batchSize), // Metadata values must be strings + startTime: new Date().toISOString(), + requestId: ctx.sessionId, + }, + }); + + ctx.logger.info('Stream created for batch processing', { + streamId: stream.id, + streamUrl: stream.url, + compressed: stream.compressed, + itemCount: batchSize, + }); + + // Use waitUntil for background processing + ctx.waitUntil(async () => { + try { + for (let i = 0; i < items.length; i++) { + const item = items[i]; + if (!item) continue; + + // Simulate processing + const processedItem = { + ...item, + status: 'completed', + processedAt: new Date().toISOString(), + }; + + // Use `stream.write()` directly + // The SDK handles writer management (acquisition and locking) automatically + await stream.write(`${JSON.stringify(processedItem)}\n`); + + // Simulate processing time (100-300ms per item, avg 200ms) + await new Promise((resolve) => + setTimeout(resolve, 100 + Math.random() * 200) + ); + + // Log progress periodically (every 10 items) + if ((i + 1) % 10 === 0) { + ctx.logger.info( + `Batch progress: ${i + 1}/${items.length} items, ${stream.bytesWritten} bytes written` + ); + } + } + + ctx.logger.info( + `Batch processing complete: ${items.length}/${items.length} items, ${stream.bytesWritten} bytes (uncompressed), compression: ${stream.compressed ? 'enabled' : 'disabled'}` + ); + } finally { + // Always close the stream when done + await stream.close(); + } + }); + + // Demonstrate stream listing and deletion + const allStreams = await ctx.stream.list({ + name: 'batch-processing', + }); + + ctx.logger.info(`Found ${allStreams.total} batch-processing stream(s)`); + + // Find and delete the demo stream (client-side filtering) + // Alternatively, you could use server-side filtering: `metadata: { purpose: 'demo' }` + const demoStreams = allStreams.streams.filter( + (s) => s.metadata?.purpose === 'demo' + ); + + for (const demo of demoStreams) { + await ctx.stream.delete(demo.id); + ctx.logger.info( + `Deleted demo stream: ${demo.id} (${demo.sizeBytes} bytes)` + ); + } + + if (demoStreams.length > 0) { + ctx.logger.info( + `Cleanup complete: deleted ${demoStreams.length} demo stream(s)` + ); + } + + // Return stream info immediately (non-blocking) + return resp.json({ + streamId: stream.id, + streamUrl: stream.url, + status: 'processing', + compressed: stream.compressed, + itemCount: batchSize, + message: + 'Batch processing started. Demo stream created and deleted to showcase stream management. Access the stream URL to see processing results.', + }); + } catch (error) { + ctx.logger.error('Error in batch processing:', error); + return resp.text('Sorry, there was an error processing your request.'); + } + } + + return resp.text('You sent an invalid message.'); +} + +export const welcome = () => { + return { + welcome: `Welcome to the Streaming example agent.\n\n### About\n\nThis agent demonstrates real-time streaming patterns from high-level to low-level control. Learn agent chaining with \`resp.stream()\`, LLM integration with \`pipeTo()\`, and manual control with \`stream.write()\`.\n\nLearn more: [Streaming Guide](https://agentuity.dev/Guides/agent-streaming)\n\n### Testing\n\nChoose a streaming pattern below to see different approaches to real-time data delivery.\n\n### Questions?\n\nYou can type "help" at any time to learn more about streaming capabilities, or chat with our expert agent by selecting the kitchen-sink agent.`, + prompts: [ + { + data: 'Agent Chain', + contentType: 'text/plain', + }, + { + data: 'LLM Streaming', + contentType: 'text/plain', + }, + { + data: 'Manual Streaming', + contentType: 'text/plain', + }, + ], + }; +}; diff --git a/src/agents/example-teams/README.md b/src/agents/example-teams/README.md new file mode 100644 index 0000000..3c24ba3 --- /dev/null +++ b/src/agents/example-teams/README.md @@ -0,0 +1,532 @@ +# Microsoft Teams Bot Setup Guide + +Complete guide to building and deploying a Teams bot using Agentuity. + +## Overview + +This example demonstrates: +- **Serverless Teams bot** using CloudAdapter and `processActivityDirect` +- **AI conversations** with context retention (last 10 messages) +- **KV storage** for conversation history +- **Zod validation** for fail-fast error handling +- **Azure Bot Service integration** without running your own HTTP server + +**Related Example:** This agent uses many of the same patterns as the `example-chat` agent (KV storage, message history, AI context), but adapted for Microsoft Teams. + +## Prerequisites + +- Microsoft Azure account +- Microsoft Teams account +- Agentuity account and CLI installed +- Basic understanding of Teams bots and Bot Framework + +## Architecture Overview + +This agent uses two files: + +- **`teams.ts`** - Platform-specific Teams configuration ([CloudAdapter](https://learn.microsoft.com/en-us/dotnet/api/microsoft.bot.builder.integration.aspnet.core.cloudadapter?view=botbuilder-dotnet-stable), authentication, schemas) +- **`index.ts`** - Agent implementation (request handling, AI logic, KV storage) + +**How this differs from Microsoft samples:** Most [Microsoft examples](https://github.com/microsoft/BotBuilder-Samples/blob/55d1f5372ee811e1fb8c0ccb15cc86340cc15679/samples/javascript_nodejs/80.skills-simple-bot-to-bot/echo-skill-bot/index.js) use `adapter.process(req, res, callback)` which expects Node.js HTTP objects and controls the response. Agentuity uses `adapter.processActivityDirect(authHeader, activity, callback)` instead - a serverless-friendly method that lets you control the response while still getting full Bot Framework features (JWT validation, TurnContext, ConnectorClient, etc.). + +## Part 1: Azure Bot Service Setup + +### 1.1 Create Azure Bot Resource + +1. Go to [Azure Portal](https://portal.azure.com) + +![Azure Portal home page with Create a resource button](/.github/example-teams/teams-setup-1-azure-home.png) + +2. Click **Create a resource** → **Categories** → **AI Apps and Agents** + +![Azure Portal showing AI Apps and Agents category](/.github/example-teams/teams-setup-2-categories.png) + +3. Find **Azure Bot** and click **Create** + +![Azure Bot resource creation page](/.github/example-teams/teams-setup-3-create-bot.png) + +**Configuration:** +- **Bot handle**: Choose unique name (e.g., `my-agentuity-bot`) +- **Subscription**: Select your subscription +- **Resource group**: Create new or use existing +- **Data residency**: Global (recommended) +- **Pricing tier**: Standard (default) - You can change to F0 (Free) after creation if needed +- **Type of App**: Single Tenant +- **Creation type**: Create new Microsoft App ID + +4. Click **Review + Create** → **Create** + +### 1.2 Get Bot Credentials + +After creation completes: + +1. Go to your bot resource +2. Navigate to **Configuration** (left sidebar) +3. Copy these values: + +``` +Microsoft App ID: +Microsoft App Tenant ID: +``` + +4. Click **Manage** next to "Microsoft App ID" +5. Go to **Certificates & secrets** → **Client secrets** → **New client secret** +6. Add description (e.g., "Agentuity bot secret") +7. Select expiration +8. Click **Add** +9. **Copy the secret value immediately** (you won't see it again) + +``` +Client secret value: +``` + +![Azure App Registration showing client secret configuration](/.github/example-teams/teams-setup-4-bot-credentials.png) + +### 1.3 Enable Teams Channel + +1. In your bot resource, go to **Channels** (left sidebar) +2. Click the **Microsoft Teams** icon under "Available channels" +3. Review the settings and keep the default (Messaging: Commercial) +4. Click **Apply** + +**Verification:** The Teams channel should now appear in "Connected channels" with "Healthy" status. + +![Azure Bot channels page showing Microsoft Teams and Web Chat with Healthy status](/.github/example-teams/teams-setup-5-channels.png) + +![Microsoft Teams channel configuration showing messaging settings](/.github/example-teams/teams-setup-6-channels-details.png) + +**Note:** Web Chat and Direct Line channels are enabled by default - you can use Web Chat for testing (see [Part 3.4](#34-test-your-bot-optional-but-recommended)). The messaging endpoint will be configured in [Part 3.3](#33-configure-messaging-endpoint) after deploying to Agentuity. + +## Part 2: Environment Variables Setup + +### 2.1 Create .env File + +In your project root, create/update `.env`: + +```bash +# Azure Bot Service credentials +TEAMS_BOT_APP_ID= +TEAMS_BOT_APP_PASSWORD= +TEAMS_BOT_TENANT_ID= +``` + +### 2.2 Configure in Agentuity + +Add environment variables to your Agentuity project: + +```bash +# Using Agentuity CLI +agentuity env set TEAMS_BOT_APP_ID "your-app-id" +agentuity env set TEAMS_BOT_APP_PASSWORD "your-password" +agentuity env set TEAMS_BOT_TENANT_ID "your-tenant-id" +``` + +Or view/set them in the Agentuity dashboard under your project's **Settings** tab. + +## Part 3: Deploy to Agentuity + +### 3.1 Deploy the Agent/Project + +```bash +# From project root +agentuity deploy +``` + +### 3.2 Get Webhook URL + +After deployment: + +1. Go to Agentuity dashboard +2. Navigate to your project → Agents → `example-teams` +3. Copy the webhook URL +4. Format: `https://api.agentuity.com/webhooks/agent/` + +### 3.3 Configure Messaging Endpoint + +Now that you have your webhook URL, configure it in Azure: + +1. **Configuration** → **Messaging endpoint** +2. Paste your Agentuity webhook URL +3. Click **Save** + +![Azure Bot Configuration page showing messaging endpoint field](/.github/example-teams/teams-setup-7-messaging-endpoint.png) + +**Verification:** +- Azure will send a test message to verify the endpoint +- Check Agentuity logs to confirm the bot received it +- If successful, you'll see "Teams bot request received" in logs + +### 3.4 Test Your Bot (Optional but Recommended) + +Before creating the Teams manifest, you can test your bot in Azure's Web Chat to verify everything works: + +1. In your Azure Bot resource, click **Test in Web Chat** (left sidebar) +2. The chat interface will open automatically +3. Send a message (e.g., "Hello") to test the bot + +**What to expect:** +- Welcome message appears automatically +- Bot responds to your messages with AI-generated replies +- Conversation context is maintained (try asking "What did I ask earlier?") + +![Azure Bot Test in Web Chat showing conversation with AI bot](/.github/example-teams/teams-setup-8-test-webchat.png) + +**Why test here first:** +- Verify your bot logic works before Teams setup +- Faster iteration (no manifest creation needed) +- Same Bot Framework integration as Teams +- Useful for debugging + +**Note:** Web Chat is automatically enabled for all Azure Bots. If you see errors, check: +- Agentuity webhook URL is correct in Configuration +- Environment variables are set in Agentuity +- Bot is deployed to Agentuity + +## Part 4: Create Teams App Manifest + +### 4.1 Prepare Icons + +This example includes placeholder icons in `src/agents/test-agent/appManifest/`. You can use these to get started quickly, or replace them with your own branding: + +**color.png:** +- The icon for the bot (shown in Teams app list, chat header, etc.) +- Dimensions: 192x192 pixels +- Full color logo/icon +- File size: < 100KB recommended + +**outline.png:** +- Dimensions: 32x32 pixels +- **Transparent background** (Alpha=0 for background pixels) +- **White foreground only** (RGB: 255,255,255, Alpha=255) +- No anti-aliasing or semi-transparent pixels + +**To customize:** Replace the placeholder icons with your own branding, ensuring they meet the requirements above. + +**Note:** Teams is very strict about outline.png transparency. Ensure the background is fully transparent with only white foreground pixels. + +### 4.2 Configure manifest.json + +Edit `src/agents/test-agent/appManifest/manifest.json`: + +**Required changes:** +1. Replace `{MICROSOFT_APP_ID}` with your actual App ID (appears in 2 places: `id` and `bots[0].botId`) +2. Update `name.short` and `name.full` (optional) +3. Update `description.short` and `description.full` (optional) +4. Update `developer.name`, `developer.websiteUrl`, `developer.privacyUrl`, `developer.termsOfUseUrl` + +**Note:** Teams requires all URLs in the manifest to be valid HTTPS URLs. Use placeholder URLs (like `https://example.com/privacy`) during development if you don't have real URLs yet. You can update these later before publishing to your organization. + +### 4.3 Create Manifest Package + +```bash +cd src/agents/test-agent/appManifest +zip -r manifest.zip manifest.json color.png outline.png +``` + +Verify the zip contains exactly 3 files: +```bash +unzip -l manifest.zip +# Should show: +# manifest.json +# color.png +# outline.png +``` + +### 4.4 Validate Manifest (Recommended) + +Before installing in Teams, validate your manifest: + +1. Go to [Teams Developer Portal](https://dev.teams.microsoft.com/home) +2. Click **New app** → **Import app** + +![Teams Developer Portal import app dialog](/.github/example-teams/teams-setup-9-validate-manifest.png) + +3. Upload `manifest.zip` +4. Check for validation errors - if successful, you'll see no errors and can proceed + +**Common errors:** +- **"Outline icon is not transparent"**: Fix outline.png (see [Part 4.1](#41-prepare-icons)) +- **"Invalid App ID"**: Ensure App ID matches Azure Bot registration +- **"Invalid URLs"**: Ensure all URLs are HTTPS and accessible + +If validation fails, fix the issues, re-create `manifest.zip`, and validate again. + +## Part 5: Install in Microsoft Teams + +### 5.1 Upload Custom App + +**Option A: Direct Upload (Easiest)** +1. Open Microsoft Teams (desktop app or [web version](https://teams.microsoft.com/v2/)) +2. Click **Apps** (left sidebar) +3. Click **Manage your apps** (bottom left) +4. Click **Upload an app** → **Upload a custom app** +5. Select `manifest.zip` +6. Click **Add** + +![Microsoft Teams upload custom app dialog](/.github/example-teams/teams-setup-11-teams-upload.png) + +**Option B: Via Developer Portal** +1. In Developer Portal, after importing app +2. Click **Publish** → **Publish to org** (requires admin approval) +3. Or click **Download** to get manifest.zip and use Option A + +### 5.2 Start Chatting + +1. Find your bot in Teams Apps +2. Click **Chat** to start 1:1 conversation +3. Bot should send welcome message: + ``` + Hello! I'm an AI assistant. Send me a message and I'll help you with anything you need! + ``` + +## Part 6: Testing + +### 6.1 Basic Functionality Tests + +| Test | Steps | Expected Result | +|------|-------|-----------------| +| **Welcome message** | Add bot to chat | Welcome message appears automatically | +| **AI conversation** | 1. Send: "What should I eat for dinner?"
2. Send: "Make it vegetarian" | Bot provides suggestions and remembers context | +| **Conversation history** | 1. Send several messages
2. Ask: "What did I ask earlier?" | Bot references previous conversation | +| **Context limits** | Send 10+ messages | Bot continues responding (keeps last 10 messages) | + +### 6.2 Check Agentuity Logs + +Monitor logs in Agentuity dashboard: + +``` +Teams bot request received +Processing activity { type: 'message', id: '...', from: 'John Doe' } +Bot logic executing { activityType: 'message' } +AI response sent { userId: '...', messageCount: 6 } +Request processed successfully +``` + +**Expected logs per message:** +1. "Teams bot request received" +2. "Processing activity" +3. "Bot logic executing" +4. "AI response sent" (with message count) +5. "Request processed successfully" + +### 6.3 Verify KV Storage + +In Agentuity dashboard, check Key-Value storage: + +**Bucket:** `teams-chats` +**Keys:** +- `chat-` - Conversation history (24h TTL) +- `ref-` - Conversation reference for proactive messaging (30d TTL) + +**Content:** Array of messages with role, content, timestamp + +![Agentuity Key-Value storage showing Teams chat history](/.github/example-teams/teams-setup-12-kv-storage.png) + +## Part 7: Proactive Messaging + +After chatting with the bot, you can send yourself (or others) messages proactively via webhook/curl - without initiating a conversation first. This is useful for: +- Cron-triggered notifications +- Event-driven alerts +- Scheduled reminders +- System announcements + +For more details on proactive messaging patterns, see Microsoft's [Send proactive messages guide](https://learn.microsoft.com/en-us/microsoftteams/platform/bots/how-to/conversations/send-proactive-messages?tabs=javascript). + +### 7.1 How It Works + +When you send your first message to the bot, the system automatically: +1. Stores a conversation reference in KV storage +2. Logs your `userKey` (short 16-character hash) +3. Keeps this reference for 30 days + +You can then use this `userKey` to send proactive messages via the same webhook URL. + +### 7.2 Getting Your User Key + +**Method 1: Check Logs (Easiest)** + +After chatting with the bot, check your Agentuity logs for: + +``` +Conversation reference stored { + userName: 'John Doe', + userKey: 'YOUR_USER_KEY', ← Copy this! + fullUserId: '29:1-...', + note: 'Use userKey in curl commands for proactive messaging' +} +``` + +**Method 2: Check KV Storage** + +1. Go to Agentuity dashboard → Key-Value Storage +2. Select bucket: `teams-chats` +3. Find key: `ref-` (e.g., `ref-YOUR_USER_KEY`) +4. The suffix after `ref-` is your `userKey` +5. Open the value to see the full conversation reference data + +### 7.3 Send a Proactive Message + +**Step-by-step:** + +1. **Chat with the bot in Teams** + - Send any message (e.g., "Hello") + - Bot responds with AI-generated reply + +2. **Find your userKey** + - Check Agentuity logs → Look for "Conversation reference stored" + - Copy the `userKey` value + +3. **Send the proactive message** + ```bash + curl https://api.agentuity.com/webhooks/agent/ \ + -X POST \ + -H 'Content-Type: application/json' \ + -d '{"userKey": "YOUR_USER_KEY", "text": "Testing proactive messaging!"}' + ``` + +4. **Verify in Teams** + - Message should appear in your chat with the bot + - No action required - it's truly proactive! + +**Parameters:** +- `userKey` (required): The 16-character hash from logs or KV storage +- `text` (required): The message to send + +**Success response:** +```json +{ + "success": true, + "message": "Proactive message sent", + "userKey": "YOUR_USER_KEY", + "userName": "John Doe" +} +``` + +**Common errors:** + +**Not found (404):** +```json +{ + "error": "Conversation reference not found", + "userKey": "YOUR_USER_KEY", + "hint": "Send a message to the bot in Teams to initialize the conversation reference, then retry." +} +``` + +**Missing parameters (400):** +```json +{ + "error": "Both userKey and text are required", + "example": { + "userKey": "a1b2c3d4e5f6g7h8", + "text": "Your message here" + } +} +``` + +**Blocked (403):** +```json +{ + "error": "User has blocked or uninstalled the bot", + "details": "..." +} +``` + +### 7.4 Use Cases + +**Cron-triggered notifications:** + +Set up scheduled messages using Agentuity's cron feature (configured in the dashboard): +- Daily reminders (e.g., "9 AM standup reminder") +- Weekly summaries (e.g., "Friday EOD report") +- Custom schedules (e.g., "Every 2 hours during business hours") + +In your cron agent, use the proactive messaging pattern from [Part 7.3](#73-send-a-proactive-message) to send messages to specific people. + +**Event-driven alerts:** +```javascript +// When a system event occurs +await fetch('https://api.agentuity.com/webhooks/agent/', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + userKey: 'USER_KEY', + text: 'Alert: Your deployment completed successfully!' + }) +}); +``` + +## Part 8: Troubleshooting + +**Tip:** If you encounter errors that are difficult to diagnose, check your bot's health status in the Azure Portal. Navigate to your bot resource → **Settings → Channels** and review the **Health Status** column for your Teams channel (or Web Chat channel if testing there). Error details and logs will appear here when there are connection or configuration issues. In most cases, though, you can see helpful agent logs in the Agentuity Console under **Projects → Logs**. + +| Problem | Solution | +|---------|----------| +| **Bot not responding** | 1. Verify Azure messaging endpoint matches Agentuity webhook URL
2. Check environment variables are set correctly in Agentuity
3. Review Agentuity logs for errors (auth header, activity structure, credentials)
4. Confirm Teams channel is enabled in Azure Bot (should show "Running") | +| **Manifest upload fails: "Outline icon not transparent"** | Fix outline.png to use fully transparent background with white foreground only (see [Part 4.1](#41-prepare-icons)), re-create manifest.zip, and try again | +| **Manifest upload fails: "App ID mismatch"** | Ensure `manifest.json` fields `id` and `bots[0].botId` both exactly match your Azure Bot App ID (case-sensitive) | +| **Manifest upload fails: "Invalid URLs"** | Ensure all URLs in manifest are HTTPS and publicly accessible (or use placeholder URLs for testing) | +| **Bot works in Web Chat but not Teams** | Verify App ID in manifest.json exactly matches Azure Bot in both the `id` field and `bots[0].botId` field. Re-zip and re-upload if needed. | +| **Signing Key errors in logs** | Usually transient - retry the request. If persistent, verify Azure Bot credentials are correct and check network connectivity. | +| **Welcome message not showing** | Welcome message only appears when bot is first added to chat or new member joins. To trigger again: remove bot from Teams and re-add it. | + +## Next Steps + +### Broadcasting to Multiple People + +The proactive messaging example in [Part 7](#part-7-proactive-messaging) sends to one person. To broadcast announcements to everyone, iterate through stored conversation references: + +```typescript +// Get all stored conversation references +const refs = await ctx.kv.list('teams-chats'); + +for (const key of refs.keys) { + // Skip non-reference keys (chat history keys) + if (!key.startsWith('ref-')) continue; + + const refResult = await ctx.kv.get('teams-chats', key); + const data = await refResult.data.json() as ConversationReferenceData; + + try { + await adapter.continueConversationAsync( + process.env.TEAMS_BOT_APP_ID!, + data.reference, + async (context) => { + await context.sendActivity('Your announcement here'); + } + ); + } catch (error) { + ctx.logger.error('Failed to send to user', { key, error }); + } +} +``` + +**Note:** Implement rate limiting and error handling to avoid hitting Teams API limits. + +### Extend Functionality + +Ideas for enhancing your bot: +- Add slash commands (`/help`, `/reset`) +- Integrate with external APIs +- Add adaptive cards for rich responses +- Multi-user conversation support (group chats) +- Persistent user preferences + +## Resources + +### Microsoft Documentation +- [Microsoft Teams Bot Documentation](https://learn.microsoft.com/en-us/microsoftteams/platform/bots/what-are-bots) +- [Send Proactive Messages](https://learn.microsoft.com/en-us/microsoftteams/platform/bots/how-to/conversations/send-proactive-messages?tabs=javascript) +- [Teams App Manifest Schema](https://learn.microsoft.com/en-us/microsoftteams/platform/resources/schema/manifest-schema) +- [Azure Bot Service](https://azure.microsoft.com/en-us/products/ai-services/ai-bot-service) + +### Bot Framework SDK +- [Bot Framework SDK (GitHub)](https://github.com/microsoft/botbuilder-js) +- [CloudAdapter Class](https://learn.microsoft.com/en-us/dotnet/api/microsoft.bot.builder.integration.aspnet.core.cloudadapter?view=botbuilder-dotnet-stable) +- [BotFrameworkAuthentication Class](https://learn.microsoft.com/en-us/javascript/api/botframework-connector/botframeworkauthentication?view=botbuilder-ts-latest) + +### Sample Code +- [Teams Conversation Bot Sample (Node.js)](https://github.com/OfficeDev/Microsoft-Teams-Samples/tree/main/samples/bot-conversation/nodejs) - Simple conversation bot similar to this example + +### Agentuity +- [Agentuity Documentation](https://agentuity.dev/) diff --git a/src/agents/example-teams/appManifest/.gitignore b/src/agents/example-teams/appManifest/.gitignore new file mode 100644 index 0000000..e7b09cc --- /dev/null +++ b/src/agents/example-teams/appManifest/.gitignore @@ -0,0 +1,5 @@ +# Local testing manifest (keep your real App IDs here) +manifest.local.json + +# Packaged manifest zips +*.zip diff --git a/src/agents/example-teams/appManifest/color.png b/src/agents/example-teams/appManifest/color.png new file mode 100644 index 0000000..57baa69 Binary files /dev/null and b/src/agents/example-teams/appManifest/color.png differ diff --git a/src/agents/example-teams/appManifest/manifest.json b/src/agents/example-teams/appManifest/manifest.json new file mode 100644 index 0000000..95abeb8 --- /dev/null +++ b/src/agents/example-teams/appManifest/manifest.json @@ -0,0 +1,35 @@ +{ + "$schema": "https://developer.microsoft.com/en-us/json-schemas/teams/v1.17/MicrosoftTeams.schema.json", + "manifestVersion": "1.17", + "version": "1.0.0", + "id": "{MICROSOFT_APP_ID}", + "developer": { + "name": "Your Name or Company", + "websiteUrl": "https://example.com", + "privacyUrl": "https://example.com/privacy", + "termsOfUseUrl": "https://example.com/terms" + }, + "icons": { + "color": "color.png", + "outline": "outline.png" + }, + "name": { + "short": "Kitchen Sink Bot", + "full": "Kitchen Sink Teams Bot" + }, + "description": { + "short": "AI assistant bot built with Agentuity", + "full": "An AI-powered conversational assistant demonstrating Microsoft Teams integration with the Agentuity platform." + }, + "accentColor": "#FFFFFF", + "bots": [ + { + "botId": "{MICROSOFT_APP_ID}", + "scopes": ["personal", "team", "groupchat"], + "supportsFiles": false, + "isNotificationOnly": false + } + ], + "permissions": ["identity", "messageTeamMembers"], + "validDomains": [] +} diff --git a/src/agents/example-teams/appManifest/outline.png b/src/agents/example-teams/appManifest/outline.png new file mode 100644 index 0000000..09f3f31 Binary files /dev/null and b/src/agents/example-teams/appManifest/outline.png differ diff --git a/src/agents/example-teams/index.ts b/src/agents/example-teams/index.ts new file mode 100644 index 0000000..58e9b09 --- /dev/null +++ b/src/agents/example-teams/index.ts @@ -0,0 +1,219 @@ +import type { AgentRequest, AgentResponse, AgentContext } from '@agentuity/sdk'; +import { TurnContext, type Activity } from 'botbuilder'; +import { openai } from '@ai-sdk/openai'; +import { generateText } from 'ai'; +import { + ActivitySchema, + adapter, + hashUserId, + handleProactiveMessage, + type MessageHistory, + type ConversationReferenceData, +} from './teams'; + +export default async function Agent( + req: AgentRequest, + resp: AgentResponse, + ctx: AgentContext +) { + try { + // Get the Authorization header + const headers = req.metadata.headers as Record | undefined; + const authHeader = headers?.authorization; + + // No auth header = proactive message request (curl/webhook/agent call) + // Has auth header = Teams activity (normal conversation) + if (!authHeader) { + return handleProactiveMessage(req, resp, ctx); + } + + // Validate the activity from the request body + const parseResult = ActivitySchema.safeParse(await req.data.json()); + if (!parseResult.success) { + ctx.logger.error('Invalid activity structure', { + errors: parseResult.error.issues, + }); + return resp.json( + { + error: 'Bad Request', + details: parseResult.error.issues, + }, + { status: 400 } + ); + } + + /* + * Zod provides early validation for fields the bot uses. + * CloudAdapter performs complete Bot Framework validation afterward. + */ + const activity = parseResult.data as unknown as Activity; + // Without the Zod validation: activity = (await req.data.json()) as unknown as Activity + + ctx.logger.info('Processing Teams activity', { + type: activity.type, + from: activity.from?.name, + }); + + /* + * Process the Teams activity using CloudAdapter's serverless-friendly method. + * CloudAdapter handles: + * - JWT validation against Microsoft's signing keys + * - Bot Framework protocol compliance + * - TurnContext creation with ConnectorClient for sending responses + * + * This approach is designed for serverless platforms like Agentuity, + * unlike the traditional .process() method which requires Node HTTP objects. + */ + await adapter.processActivityDirect( + authHeader, + activity, + async (turnContext: TurnContext) => { + // Handle incoming messages to the Teams bot + if (turnContext.activity.type === 'message') { + const userId = turnContext.activity.from.id; + const userMessage = turnContext.activity.text; + + // Hash Teams user ID (~90 chars) to create shorter KV key (16 hex chars) + const userKey = hashUserId(userId); + + // 1. Load conversation history from KV + let messages: MessageHistory = []; + try { + const historyResult = await ctx.kv.get( + 'teams-chats', + `chat-${userKey}` + ); + if (historyResult.exists) { + messages = (await historyResult.data.json()) as MessageHistory; + } + } catch (error) { + ctx.logger.error('Error retrieving chat history:', error); + // Continue with empty history + } + + // 2. Add user message + messages.push({ + role: 'user', + content: userMessage, + timestamp: Date.now(), + }); + + // 3. Trim to last 10 messages + if (messages.length > 10) { + messages.splice(0, messages.length - 10); + } + + // 4. Generate AI response + try { + const result = await generateText({ + model: openai('gpt-5-nano'), + system: + 'You are a helpful Teams assistant. Keep responses concise and friendly.', + messages: messages.map((m) => ({ + role: m.role, + content: m.content, + })), + }); + + // 5. Send response to Teams + await turnContext.sendActivity(result.text); + + // 6. Add assistant response to (KV storage) history + messages.push({ + role: 'assistant', + content: result.text, + timestamp: Date.now(), + }); + + // 7. Save updated history to KV (24h TTL) + await ctx.kv.set('teams-chats', `chat-${userKey}`, messages, { + ttl: 86400, // 24 hours + }); + + // 8. Store conversation reference for proactive messaging + try { + const conversationRef = TurnContext.getConversationReference( + turnContext.activity + ); + + const refData: ConversationReferenceData = { + reference: conversationRef, + fullUserId: userId, + userKey: userKey, + userName: turnContext.activity.from.name || 'Unknown', + lastUpdated: Date.now(), + }; + + // Check if this is a new reference + const existingRef = await ctx.kv.get( + 'teams-chats', + `ref-${userKey}` + ); + const isNewUser = !existingRef.exists; + + await ctx.kv.set('teams-chats', `ref-${userKey}`, refData, { + ttl: 86400 * 30, // 30 days + }); + + // Only log for new users + if (isNewUser) { + ctx.logger.info('Conversation reference stored (new user)', { + userName: refData.userName, + userKey: userKey, + note: 'Use userKey in curl commands for proactive messaging', + }); + } + } catch (error) { + ctx.logger.error('Error storing conversation reference:', error); + // Non-critical - don't fail the request + } + + ctx.logger.info('AI response sent', { + messageCount: messages.length, + }); + } catch (error) { + ctx.logger.error('Error generating AI response:', error); + await turnContext.sendActivity( + 'Sorry, I encountered an error processing your message. Please try again.' + ); + } + } else if (turnContext.activity.type === 'conversationUpdate') { + // Set up a welcome message (first time the bot is added, new member joins, etc.) + // This is also shown in the "Test in Web Chat" feature + const membersAdded = turnContext.activity.membersAdded || []; + for (const member of membersAdded) { + if (member.id !== turnContext.activity.recipient.id) { + await turnContext.sendActivity( + "Hello! I'm an AI assistant. Send me a message and I'll help you with anything you need!" + ); + ctx.logger.info('Welcome message sent', { + memberName: member.name || 'unknown', + memberId: member.id, + }); + } + } + } + } + ); + + return resp.text('', { status: 200 }); + } catch (error) { + const err = error as Error; + + ctx.logger.error('Teams bot error', { + errorName: err.name, + errorMessage: err.message, + errorStack: err.stack, + }); + + return resp.json( + { error: 'Internal server error', details: err.message }, + { status: 500 } + ); + } +} + +export const welcome = () => ({ + welcome: `Welcome to the Microsoft Teams example agent.\n\n### About\n\nThis agent demonstrates how to create a Microsoft Teams bot with AI-powered conversations. It uses many of the same patterns as the \`example-chat\` agent (KV storage, conversation history, context retention), but adapted for Microsoft Teams.\n\n### Testing\n\nTesting is not available in DevMode for this agent. Add your bot to Microsoft Teams to test. See the README for Azure setup instructions.\n\n### Questions?\n\nThe "Help" command is not available for this agent, as it's a platform-specific example.`, + prompts: [], +}); diff --git a/src/agents/example-teams/teams.ts b/src/agents/example-teams/teams.ts new file mode 100644 index 0000000..ce1d054 --- /dev/null +++ b/src/agents/example-teams/teams.ts @@ -0,0 +1,197 @@ +import { + CloudAdapter, + ConfigurationBotFrameworkAuthentication, + type TurnContext, + type ConversationReference, +} from 'botbuilder'; +import type { AgentRequest, AgentResponse, AgentContext } from '@agentuity/sdk'; +import { z } from 'zod'; +import crypto from 'crypto'; +import { handleSuccess } from '../../lib/utils'; + +// Conversation history type for KV storage +export type MessageHistory = Array<{ + role: 'user' | 'assistant'; + content: string; + timestamp: number; +}>; + +// Conversation reference storage for proactive messaging +export type ConversationReferenceData = { + reference: Partial; + fullUserId: string; // Full Teams user ID (29:1-...) + userKey: string; // Hashed user ID (16 chars) + userName: string; // User's display name + lastUpdated: number; // Timestamp +}; + +// Validate only the Activity fields that our bot logic actually uses. +// CloudAdapter performs its own validation later on. +export const ActivitySchema = z.looseObject({ + type: z.string(), + from: z.object({ + id: z.string(), + name: z.string().optional(), + }), + conversation: z.object({ + id: z.string(), + }), + recipient: z.object({ + id: z.string(), + }), + text: z.string().optional(), + membersAdded: z + .array( + z.object({ + id: z.string(), + name: z.string().optional(), + }) + ) + .optional(), +}); + +// Bot Framework authentication using Azure Bot Service credentials. +// This configures CloudAdapter to validate incoming requests from Microsoft Teams. +const auth = new ConfigurationBotFrameworkAuthentication({ + MicrosoftAppId: process.env.TEAMS_BOT_APP_ID, + MicrosoftAppPassword: process.env.TEAMS_BOT_APP_PASSWORD, + MicrosoftAppType: 'SingleTenant', + MicrosoftAppTenantId: process.env.TEAMS_BOT_TENANT_ID, +}); + +// CloudAdapter processes Teams activities and manages Bot Framework protocol +export const adapter = new CloudAdapter(auth); + +// Global error handler for Bot Framework errors. +// Sends a message to Teams while detailed errors are logged separately. +adapter.onTurnError = async (context: TurnContext, _error: Error) => { + await context.sendActivity('Sorry, something went wrong!'); +}; + +// Hash Teams user ID (~90 chars) to create shorter KV key (16 hex chars) +export function hashUserId(userId: string): string { + return crypto + .createHash('sha256') + .update(userId) + .digest('hex') + .substring(0, 16); +} + +/* + * Proactive message handler + * Triggered when request has no Teams Authorization header (curl/webhook). + * + * This example sends to a specific user. For broadcast functionality + * (e.g., announcements to all users), you can list all keys in the + * 'teams-chats' bucket using ctx.kv.list() and loop through them. + * + * Useful for: cron-triggered notifications, system-wide announcements, + * scheduled reminders. + */ +export async function handleProactiveMessage( + req: AgentRequest, + resp: AgentResponse, + ctx: AgentContext +) { + try { + const { userKey, text } = (await req.data.json()) as { + userKey: string; + text: string; + }; + + // Validate input + if (!userKey || !text) { + ctx.logger.error('Missing required fields', { userKey, text }); + return resp.json( + { + error: 'Both userKey and text are required', + example: { userKey: 'a1b2c3d4e5f6g7h8', text: 'Your message here' }, + }, + { status: 400 } + ); + } + + // Load conversation reference + const refResult = await ctx.kv.get('teams-chats', `ref-${userKey}`); + + if (!refResult.exists) { + ctx.logger.warn('Conversation reference not found', { userKey }); + return resp.json( + { + error: 'Conversation reference not found', + userKey, + hint: 'Send a message to the bot in Teams to initialize/refresh the conversation reference (30-day TTL), then retry.', + }, + { status: 404 } + ); + } + + const data = + (await refResult.data.json()) as unknown as ConversationReferenceData; + const { reference, userName } = data; + + // Verify botAppId is configured + const botAppId = process.env.TEAMS_BOT_APP_ID; + if (!botAppId) { + ctx.logger.error('TEAMS_BOT_APP_ID not configured'); + return resp.json({ error: 'Bot configuration error' }, { status: 500 }); + } + + // Send proactive message using stored conversation reference + await adapter.continueConversationAsync( + botAppId, + reference, + async (turnContext: TurnContext) => { + await turnContext.sendActivity(text); + } + ); + + ctx.logger.info('Proactive message sent successfully', { + userKey, + }); + + // Ping Checkly on success + await handleSuccess( + ctx, + 'example-teams', + process.env.CHECKLY_EXAMPLE_TEAMS_URL + ); + + return resp.json({ + success: true, + message: 'Proactive message sent', + userKey, + userName, + }); + } catch (error) { + const err = error as Error; + + // Handle user blocked/uninstalled bot + if ('statusCode' in err && err.statusCode === 403) { + ctx.logger.warn('User has blocked or uninstalled the bot', { + error: err.message, + }); + return resp.json( + { + error: 'User has blocked or uninstalled the bot', + details: err.message, + }, + { status: 403 } + ); + } + + // Handle other errors + ctx.logger.error('Failed to send proactive message', { + error: err.message, + stack: err.stack, + }); + + return resp.json( + { + error: 'Failed to send proactive message', + details: err.message, + }, + { status: 500 } + ); + } +} diff --git a/src/agents/example-telegram/README.md b/src/agents/example-telegram/README.md new file mode 100644 index 0000000..ac76280 --- /dev/null +++ b/src/agents/example-telegram/README.md @@ -0,0 +1,116 @@ +# Steps to create and configure the Telegram integration + +## 1. Deploy and Get API Endpoint URL + +Deploy the Kitchen Sink project to the Agentuity cloud: +```bash +agentuity deploy +``` + +Open the `example-telegram` agent in the [Agentuity web app](https://app.agentuity.com/). Click on the API IO, ensure it's set to `Public`, and copy the API endpoint URL: + +![Agentuity agent showing API endpoint URL configuration](/.github/example-telegram/telegram-setup-1-api-endpoint.png) + +## 2. Create Telegram Bot with BotFather + +Open Telegram and start a conversation with [@BotFather](https://t.me/BotFather). This is the official bot for creating and managing Telegram bots: + +![BotFather conversation showing available commands](/.github/example-telegram/telegram-setup-2-bot-father.png) + +Send `/newbot` to create a new bot and follow the prompts to: +- **Choose a name** for your bot (this appears in contact details) +- **Choose a username** for your bot (must end in 'bot', e.g. `my_kitchen_sink_bot`) + +BotFather will provide you with a **Bot Token**. Copy this token - you'll need it for the next step: + +![BotFather providing bot token after successful creation](/.github/example-telegram/telegram-setup-3-token.png) + +> **Important:** Keep your bot token secure and never share it publicly. Anyone with this token can control your bot. + +## 3. Configure Environment Variables + +Set the bot token in your Agentuity project: + +For local development, add to your `.env` file: +```env +TELEGRAM_BOT_TOKEN=1234567890:ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghi +``` + +For production deployment: +```bash +agentuity env set --secret TELEGRAM_BOT_TOKEN your-bot-token-here +``` + +## 4. Set Up Webhook + +Configure your bot to receive updates via webhook using the Telegram Bot API. You can do this with a simple HTTP request: + +```bash +curl -X POST "https://api.telegram.org/bot/setWebhook" \ + -H "Content-Type: application/json" \ + -d '{"url": "YOUR_API_ENDPOINT_URL"}' +``` + +Replace: +- `` with your bot token from step 2 +- `YOUR_API_ENDPOINT_URL` with the URL from step 1 + +You should receive a response like: +```json +{"ok":true,"result":true,"description":"Webhook was set"} +``` + +> **Alternative:** You can also set the webhook programmatically or use tools like Postman for the API call. + +## 5. Test Your Bot + +Add your bot to a Telegram chat: + +1. **Find your bot** by searching for its username in Telegram +2. **Start a conversation** by clicking "Start" or sending `/start` +3. **Send a message** and verify the bot responds with AI-generated replies + +## What This Agent Does + +This agent demonstrates Telegram bot integration with AI-powered conversations. It: + +1. **Receives webhook updates** from Telegram when you send a message +2. **Maintains conversation history** using Agentuity's built-in KV storage (last 10 messages per chat) +3. **Generates AI responses** using OpenAI's GPT models with conversation context +4. **Sends replies** back to Telegram using the Bot API with proper message threading + +### Supported Chat Types + +The bot works in: +- **Private chats** - Direct messages with users +- **Group chats** - Public group conversations +- **Supergroups** - Large group conversations with topics support +- **Channels** - Broadcast channels (if added as admin) + +## Technical Implementation + +### Webhook Processing + +Telegram sends webhook payloads as [Update objects](https://core.telegram.org/bots/api#update), which contain: +- `update_id` - Unique update identifier +- `message` - Message object with text, sender info, and chat details +- Additional fields for other update types (edited messages, inline queries, etc.) + +### API Integration + +The agent uses Telegram's [sendMessage](https://core.telegram.org/bots/api#sendmessage) method with: +- **Modern reply format**: `reply_parameters: { message_id }` +- **Plain text messages**: No `parse_mode` to avoid API errors from unescaped special characters in AI-generated content +- **Error handling**: Proper API error response handling + +## Rate Limits and Best Practices + +### Telegram Rate Limits +- **1 message per second** per chat +- **20 messages per minute** per group +- **30 messages per second** globally for bulk sends + +### Best Practices +- Always return `200 OK` from webhook endpoints +- Process messages asynchronously to avoid webhook timeouts +- Filter bot messages to prevent infinite loops diff --git a/src/agents/example-telegram/index.ts b/src/agents/example-telegram/index.ts new file mode 100644 index 0000000..6f85c71 --- /dev/null +++ b/src/agents/example-telegram/index.ts @@ -0,0 +1,111 @@ +import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; +import { openai } from '@ai-sdk/openai'; +import { generateText } from 'ai'; +import { sendTelegramMessage, type TelegramUpdate } from './telegram'; + +export default async function Agent( + req: AgentRequest, + resp: AgentResponse, + ctx: AgentContext +) { + try { + // No manual trigger handling + if (req.trigger === 'manual') { + return resp.text('This agent only responds to Telegram triggers.'); + } + + // Parse webhook update + const update = await req.data.object(); + + // Filter non-text messages and bot messages + if (!update?.message?.text || update.message.from?.is_bot) { + return resp.text('OK'); + } + + // Telegram wants a fast 200 OK response, so we return that immediately + const response = resp.text('OK'); + + // Use waitUntil to process message in the background + ctx.waitUntil(async () => { + try { + if (!update.message) return; + + const chatId = update.message.chat.id; + const chatKey = `telegram-chat-${chatId}`; + + // Get conversation history from KV + type MessageHistory = Array<{ + role: 'user' | 'assistant'; + content: string; + timestamp: number; + }>; + + let messages: MessageHistory = []; + + try { + const historyResult = await ctx.kv.get('kitchen-sink', chatKey); + + if (historyResult.exists) { + messages = (await historyResult.data.json()) as MessageHistory; + } + } catch (error) { + ctx.logger.error('Error retrieving chat history:', error); + } + + // Add new message to history + messages.push({ + role: 'user', + content: update.message.text!, + timestamp: update.message.date, // Unix timestamp from Telegram API + }); + + // Keep last 10 messages + if (messages.length > 10) { + messages.splice(0, messages.length - 10); + } + + // Store updated history + await ctx.kv.set('kitchen-sink', chatKey, messages, { ttl: 86400 }); + + // Generate AI response + const result = await generateText({ + model: openai('gpt-5-mini'), + system: + 'You are a helpful Telegram bot assistant. Keep responses concise and friendly.', + messages: messages.map((m) => ({ role: m.role, content: m.content })), + }); + + // Send reply + const token = process.env.TELEGRAM_BOT_TOKEN!; + await sendTelegramMessage( + token, + chatId, + result.text, + ctx, + update.message.message_id + ); + + // Add bot response to history + messages.push({ + role: 'assistant', + content: result.text, + timestamp: Math.floor(Date.now() / 1000), // Unix timestamp to match Telegram API format + }); + await ctx.kv.set('kitchen-sink', chatKey, messages, { ttl: 86400 }); + } catch (error) { + ctx.logger.error('Error processing Telegram message:', error); + // Error contained in background task - let it complete gracefully without throwing + } + }); + + return response; + } catch (error) { + ctx.logger.error('Error running agent:', error); + return new Response('Internal Server Error', { status: 500 }); + } +} + +export const welcome = () => ({ + welcome: `Welcome to the Telegram example agent.\n\n### About\n\nThis agent demonstrates how to create a Telegram bot that can have AI-powered conversations using webhooks.\n\n### Testing\n\nTesting is not available in DevMode for this agent. Add your bot to a Telegram chat to test.\n\n### Questions?\n\nThe "Help" command is not available for this agent, as it's a platform-specific example.`, + prompts: [], +}); diff --git a/src/agents/example-telegram/telegram.ts b/src/agents/example-telegram/telegram.ts new file mode 100644 index 0000000..151947c --- /dev/null +++ b/src/agents/example-telegram/telegram.ts @@ -0,0 +1,57 @@ +import type { AgentContext } from '@agentuity/sdk'; + +// Minimal type definitions +export interface TelegramUpdate { + update_id: number; + message?: TelegramMessage; +} + +export interface TelegramMessage { + message_id: number; + message_thread_id?: number; // For supergroups with topics + date: number; + chat: { + id: number; + type: 'private' | 'group' | 'supergroup' | 'channel'; + }; + from?: { + id: number; + is_bot: boolean; + first_name: string; + username?: string; + }; + text?: string; + reply_to_message?: TelegramMessage; // For handling reply chains +} + +// Send message via Telegram Bot API +export async function sendTelegramMessage( + botToken: string, + chatId: number, + text: string, + ctx: AgentContext, + replyToMessageId?: number +) { + const url = `https://api.telegram.org/bot${botToken}/sendMessage`; + + const response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + chat_id: chatId, + text, + // Use ReplyParameters (with backward compatibility) + ...(replyToMessageId && { + reply_parameters: { message_id: replyToMessageId }, + }), + }), + }); + + if (!response.ok) { + const error = await response.text(); + ctx.logger.error('Telegram API error:', error); + throw new Error(`Telegram API error: ${error}`); + } + + return response.json(); +} diff --git a/src/agents/gateway-byo-token/index.ts b/src/agents/gateway-byo-token/index.ts index ca9d8e8..cb4e447 100644 --- a/src/agents/gateway-byo-token/index.ts +++ b/src/agents/gateway-byo-token/index.ts @@ -58,7 +58,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the AI Gateway Bring-Your-Own-Token example agent.\n\n### About\n\nThe AI Gateway provides seamless access to multiple AI providers through a single interface. It automatically routes your LLM requests, tracks usage and costs, and eliminates the need to manage individual API keys for each provider.\n\nYou can, however, choose to _bring your own token_ instead of utilizing the AI Gateway. This allows you to use your own API keys for the AI providers, but means you won't have access to the same features as the AI Gateway, such as tracing and usage metrics.\n\n### Testing\n\nStart by saving your OpenAI API key in your .env file(s). Make sure to remove the key from your .env file when you're done testing and want to go back to using the AI Gateway.\n\nThen send a plain-text message with any content and we'll show you the response.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the AI Gateway Bring-Your-Own-Token example agent.\n\n### About\n\nThe AI Gateway provides seamless access to multiple AI providers through a single interface. It automatically routes your LLM requests, tracks usage and costs, and eliminates the need to manage individual API keys for each provider.\n\nYou can, however, choose to _bring your own token_ instead of utilizing the AI Gateway. This allows you to use your own API keys for the AI providers, but means you won't have access to the same features as the AI Gateway, such as tracing and usage metrics.\n\n### Testing\n\nStart by saving your OpenAI API key in your .env file(s). Make sure to remove the key from your .env file when you're done testing and want to go back to using the AI Gateway.\n\nThen send a plain-text message with any content and we'll show you the response.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [AI Gateway Guide](https://agentuity.dev/Guides/ai-gateway) for more info and best practices.`, prompts: [ { data: `Tell me a short story about AI agents`, diff --git a/src/agents/gateway-provider/index.ts b/src/agents/gateway-provider/index.ts index 8cb4aeb..5d2729c 100644 --- a/src/agents/gateway-provider/index.ts +++ b/src/agents/gateway-provider/index.ts @@ -36,7 +36,7 @@ export default async function Agent( // Google const resultGoogle = await generateText({ - model: google('gemini-2.0-flash-001'), + model: google('gemini-2.5-flash'), system: 'You are a fantastic storyteller. Your story should be 50 words or less, in markdown format.', prompt, @@ -47,7 +47,7 @@ export default async function Agent( '### OpenAI (GPT-5 Nano)\n\n' + resultOpenAI.text + '\n\n---\n\n' + - '### Google (Gemini 2.0 Flash)\n\n' + + '### Google (Gemini 2.5 Flash)\n\n' + resultGoogle.text ); } catch (error) { @@ -59,7 +59,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the AI Gateway Provider example agent.\n\n### About\n\nThe AI Gateway provides seamless access to multiple AI providers through a single interface. It automatically routes your LLM requests, tracks usage and costs, and eliminates the need to manage individual API keys for each provider.\n\n### Testing\n\nSend a plain-text message with any content and we'll show you the responses from two different providers.\n\nAfterwards, click on the session below to see what tracing is available, including usage and costs.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the AI Gateway Provider example agent.\n\n### About\n\nThe AI Gateway provides seamless access to multiple AI providers through a single interface. It automatically routes your LLM requests, tracks usage and costs, and eliminates the need to manage individual API keys for each provider.\n\n### Testing\n\nSend a plain-text message with any content and we'll show you the responses from two different providers.\n\nAfterwards, click on the session below to see what tracing is available, including usage and costs.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [AI Gateway Guide](https://agentuity.dev/Guides/ai-gateway) for more info and best practices.`, prompts: [ { data: `Tell me a short story about AI agents`, diff --git a/src/agents/handler-request/index.ts b/src/agents/handler-request/index.ts index 3bf3ffa..02b900f 100644 --- a/src/agents/handler-request/index.ts +++ b/src/agents/handler-request/index.ts @@ -80,7 +80,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Agent Handler Request example agent.\n\n### About\n\nData that is sent to your agent is transferred as raw binary data and the content type is provided to the agent through the request object.\n\nWe provide a few different ways to handle data formats in your agents to make it easier to work with different data types. Of course, your agent can always perform its own data handling by use the raw data and the content type property. However, most common data types are supported out of the box.\n\n### Testing\n\nSend a plain-text or JSON message with any content and we'll show you what the request looks like.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Agent Handler Request example agent.\n\n### About\n\nData that is sent to your agent is transferred as raw binary data and the content type is provided to the agent through the request object.\n\nWe provide a few different ways to handle data formats in your agents to make it easier to work with different data types. Of course, your agent can always perform its own data handling by use the raw data and the content type property. However, most common data types are supported out of the box.\n\n### Testing\n\nSend a plain-text or JSON message with any content and we'll show you what the request looks like.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Agent Data Handling Guide](https://agentuity.dev/Guides/agent-data-handling) for more info and best practices.`, prompts: [ { data: `Hello, world!`, diff --git a/src/agents/handler-response/index.ts b/src/agents/handler-response/index.ts index a452754..ef9447f 100644 --- a/src/agents/handler-response/index.ts +++ b/src/agents/handler-response/index.ts @@ -85,7 +85,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Agent Handler Response example agent.\n\n### About\n\nData that is sent to your agent is transferred as raw binary data and the content type is provided to the agent through the request object.\n\nWe provide a few different ways to handle data formats in your agents to make it easier to work with different data types. Of course, your agent can always perform its own data handling by use the raw data and the content type property. However, most common data types are supported out of the box.\n\n### Testing\n\nChoose one of the pre-set message options and we'll show you the appropriate response.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Agent Handler Response example agent.\n\n### About\n\nData that is sent to your agent is transferred as raw binary data and the content type is provided to the agent through the request object.\n\nWe provide a few different ways to handle data formats in your agents to make it easier to work with different data types. Of course, your agent can always perform its own data handling by use the raw data and the content type property. However, most common data types are supported out of the box.\n\n### Testing\n\nChoose one of the pre-set message options and we'll show you the appropriate response.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Agent Data Handling Guide](https://agentuity.dev/Guides/agent-data-handling) for more info and best practices.`, prompts: [ { data: `Image`, diff --git a/src/agents/io-email/index.ts b/src/agents/io-email/index.ts index 9ebd336..92a5453 100644 --- a/src/agents/io-email/index.ts +++ b/src/agents/io-email/index.ts @@ -1,5 +1,5 @@ import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; -import { handleError, handleHelpMessage } from '../../lib/utils'; +import { handleError, handleHelpMessage } from '../../lib/utils'; // TODO: add handleSuccess here when SDK method is available export default async function Agent( req: AgentRequest, @@ -53,6 +53,10 @@ ${textBody}`,
${htmlBody || `
${textBody}
`}
`, }); + // TODO: Ping Checkly on successful email processing (blocked by missing SDK method) + // NOTE: This works for ANY email received (manual or automated) + // await handleSuccess(ctx, 'io-email', process.env.CHECKLY_IO_EMAIL_URL); + return resp.text('Email processed and reply sent'); } } catch (error) { diff --git a/src/agents/io-sms/index.ts b/src/agents/io-sms/index.ts index 50dc234..86c2e99 100644 --- a/src/agents/io-sms/index.ts +++ b/src/agents/io-sms/index.ts @@ -35,6 +35,10 @@ export default async function Agent( `You sent an SMS with the following message:\n\n"${message}"\n\nFrom: ${phoneNumber}` ); + // TODO: Enable Checkly monitoring once `ctx.sendSMS()` SDK method is available + // This allows test-suite to trigger io-sms agent for automated testing: + // await handleSuccess(ctx, 'io-sms', process.env.CHECKLY_IO_SMS_URL); + return resp.text('SMS processed and reply sent'); } } catch (error) { diff --git a/src/agents/kitchen-sink/index.ts b/src/agents/kitchen-sink/index.ts index 17b7cbe..246b9f2 100644 --- a/src/agents/kitchen-sink/index.ts +++ b/src/agents/kitchen-sink/index.ts @@ -15,7 +15,7 @@ import { z } from 'zod'; export const welcome = () => { return { - welcome: `Welcome to the Kitchen Sink project, a showcase of Agentuity's SDK functionality.\n\nSelect an agent to learn more about each feature and test various scenarios. We suggest you follow along with the code for each of the agents to understand how each feature works side-by-side with the requests/responses.\n\nIf this is your first time here, start with the Handler agents.\n\n### Questions?\n\nYou can come back to this kitchen-sink agent at any time to chat with our expert agent, or find out more about specific features by sending "help" as plain-text to one of the other agents.`, + welcome: `Welcome to the Kitchen Sink project, a showcase of Agentuity's SDK functionality.\n\nSelect an agent to learn more about each feature and test various scenarios. We suggest you follow along with the code for each of the agents to understand how each feature works side-by-side with the requests/responses.\n\nIf this is your first time here, start with the Handler agents.\n\nWatch the demo: [Kitchen Sink Walkthrough](https://youtu.be/gcxqdMWY-x4)\n\n### Questions?\n\nYou can come back to this kitchen-sink agent at any time to chat with our expert agent, or find out more about specific features by sending "help" as plain-text to one of the other agents.`, prompts: [ { data: `Can you tell me more about...`, diff --git a/src/agents/observability-logging/index.ts b/src/agents/observability-logging/index.ts index 5ea9191..1636d4c 100644 --- a/src/agents/observability-logging/index.ts +++ b/src/agents/observability-logging/index.ts @@ -57,7 +57,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Observability Logging example agent.\n\n### About\n\nLogging provides structured, real-time insights into your agent's execution. Use different log levels to categorize messages and include context for easier debugging.\n\n### Testing\n\nSend a plain-text or JSON message and we'll log it at various levels. Check your Agentuity logs below to see the output.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Observability Logging example agent.\n\n### About\n\nLogging provides structured, real-time insights into your agent's execution. Use different log levels to categorize messages and include context for easier debugging.\n\n### Testing\n\nSend a plain-text or JSON message and we'll log it at various levels. Check your Agentuity logs below to see the output.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Agent Logging Guide](https://agentuity.dev/Guides/agent-logging) for more info and best practices.`, prompts: [ { data: `Testing agent logging`, diff --git a/src/agents/observability-tracing/index.ts b/src/agents/observability-tracing/index.ts index 7cc7870..d4ffe11 100644 --- a/src/agents/observability-tracing/index.ts +++ b/src/agents/observability-tracing/index.ts @@ -87,7 +87,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Observability Tracing example agent.\n\n### About\n\nTracing provides deep visibility into your agent's execution flow using OpenTelemetry. Create spans to track operations, add attributes for context, and record events to mark important moments.\n\n### Testing\n\nSend a plain-text or JSON message and we'll create traced spans with attributes and events. Check your Agentuity session timeline to see the traces!\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Observability Tracing example agent.\n\n### About\n\nTracing provides deep visibility into your agent's execution flow using OpenTelemetry. Create spans to track operations, add attributes for context, and record events to mark important moments.\n\n### Testing\n\nSend a plain-text or JSON message and we'll create traced spans with attributes and events. Check your Agentuity session timeline to see the traces!\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Agent Tracing Guide](https://agentuity.dev/Guides/agent-tracing) for more info and best practices.`, prompts: [ { data: `Testing agent tracing`, diff --git a/src/agents/storage-key-value/index.ts b/src/agents/storage-key-value/index.ts index 948951e..b5da918 100644 --- a/src/agents/storage-key-value/index.ts +++ b/src/agents/storage-key-value/index.ts @@ -90,7 +90,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Key-Value Store example agent.\n\n### About\n\nKey-value storage is your go-to solution for fast, ephemeral data that agents need to access quickly. Think of it as your agent's short-term memory — perfect for session state, configuration, caching, and temporary data.\n\n### Testing\n\nSend a plain-text or JSON message with any content and we'll store it, retrieve it, delete it, and return the value in a response to you.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Key-Value Store example agent.\n\n### About\n\nKey-value storage is your go-to solution for fast, ephemeral data that agents need to access quickly. Think of it as your agent's short-term memory — perfect for session state, configuration, caching, and temporary data.\n\n### Testing\n\nSend a plain-text or JSON message with any content and we'll store it, retrieve it, delete it, and return the value in a response to you.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Key-Value Storage Guide](https://agentuity.dev/Guides/key-value) for more info and best practices.`, prompts: [ { data: `Hello, world!`, diff --git a/src/agents/storage-object-store/index.ts b/src/agents/storage-object-store/index.ts index 97176c5..d6de9a3 100644 --- a/src/agents/storage-object-store/index.ts +++ b/src/agents/storage-object-store/index.ts @@ -21,7 +21,7 @@ export default async function Agent( * Examples * ************/ - const bucket = 'kitchen-sink'; // You must create the bucket and select a provider first + const bucket = 'kitchen-sink'; // Buckets are auto-created if they don't exist const key = `storage-object-store-${Date.now()}`; const prompt = await req.data.text(); @@ -41,7 +41,7 @@ export default async function Agent( }); } catch (_error) { return resp.text( - 'Make sure you have created the bucket and selected a provider first.' + 'Unable to store data. Please verify your storage provider is configured. You can view object storage buckets under Infrastructure → Object Storage.' ); } @@ -76,7 +76,7 @@ export default async function Agent( }); } catch (_error) { return resp.text( - 'Make sure you have created the bucket and selected a provider first.' + 'Unable to store data. Please verify your storage provider is configured. You can view object storage buckets under Infrastructure → Object Storage.' ); } @@ -116,19 +116,25 @@ export default async function Agent( ); } catch (_error) { return resp.text( - 'Make sure you have created the bucket and selected a provider first.' + 'Unable to store data. Please verify your storage provider is configured. You can view object storage buckets under Infrastructure → Object Storage.' ); } - const publicUrl = await ctx.objectstore.createPublicURL( - bucket, - key, - 60 * 1000 // 1 minute, optional, defaults to 1 hour - ); + try { + const publicUrl = await ctx.objectstore.createPublicURL( + bucket, + key, + 60 * 1000 // 1 minute, optional, defaults to 1 hour + ); - return resp.markdown( - `You can access the markdown file via this link for the next minute:\n\n${publicUrl}` - ); + return resp.markdown( + `You can access the markdown file via this link for the next minute:\n\n${publicUrl}` + ); + } catch (_error) { + return resp.text( + 'Data stored successfully, but unable to create a public URL.' + ); + } } catch (error) { ctx.logger.error('Error running agent:', error); @@ -141,7 +147,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Object Store example agent.\n\n### About\n\nObject storage is your solution for storing files, media, and large unstructured data that agents need to manage. Think of it as your agent's file system — perfect for documents, images, videos, backups, and any binary content.\n\n### Testing\n\nChoose one of the pre-set message options and we'll store it, retrieve it, delete it, and return the value in a response to you. If you choose "Public URL", we'll provide a link you can use to access the file.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Object Store example agent.\n\n### About\n\nObject storage is your solution for storing files, media, and large unstructured data that agents need to manage. Think of it as your agent's file system — perfect for documents, images, videos, backups, and any binary content.\n\n### Testing\n\nChoose one of the pre-set message options and we'll store it, retrieve it, delete it, and return the value in a response to you. If you choose "Public URL", we'll provide a link you can use to access the file.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Object Storage Guide](https://agentuity.dev/Guides/object-storage) for more info and best practices.`, prompts: [ { data: `Plain-Text`, diff --git a/src/agents/storage-vector/index.ts b/src/agents/storage-vector/index.ts index b763351..0b58375 100644 --- a/src/agents/storage-vector/index.ts +++ b/src/agents/storage-vector/index.ts @@ -133,7 +133,7 @@ export default async function Agent( export const welcome = () => { return { - welcome: `Welcome to the Vector Storage example agent.\n\n### About\n\nVector storage enables semantic search for your agents, allowing them to find information by meaning rather than keywords. Ideal for knowledge bases, RAG systems, and persistent agent memory.\n\n### Testing\n\nChoose one of the pre-set message options and we'll search a sample database of office chairs, showing you the most relevant matches and a recommendation. You'll notice that searching for "budget" chairs, for example, also return results for "affordable" and "cheap" chairs.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.`, + welcome: `Welcome to the Vector Storage example agent.\n\n### About\n\nVector storage enables semantic search for your agents, allowing them to find information by meaning rather than keywords. Ideal for knowledge bases, RAG systems, and persistent agent memory.\n\n### Testing\n\nChoose one of the pre-set message options and we'll search a sample database of office chairs, showing you the most relevant matches and a recommendation. You'll notice that searching for "budget" chairs, for example, also return results for "affordable" and "cheap" chairs.\n\n### Questions?\n\nYou can type "help" at any time to learn more about the capabilities of this feature, or chat with our expert agent by selecting the kitchen-sink agent.\n\n### Learn More\n\nCheck out our [Vector Database Guide](https://agentuity.dev/Guides/vector-db) for more info and best practices.`, prompts: [ { data: `comfortable office chair`, diff --git a/src/agents/test-suite/index.ts b/src/agents/test-suite/index.ts index 4d17d14..d0b79a8 100644 --- a/src/agents/test-suite/index.ts +++ b/src/agents/test-suite/index.ts @@ -1,9 +1,11 @@ import type { AgentContext, AgentRequest, AgentResponse } from '@agentuity/sdk'; import { WebClient } from '@slack/web-api'; -import { handleError } from '../../lib/utils'; +import { handleError, handleSuccess } from '../../lib/utils'; import { welcome as welcomeExampleChat } from '../example-chat'; import { welcome as welcomeExampleComposio } from '../example-composio'; import { welcome as welcomeExampleDiscord } from '../example-discord'; +import { welcome as welcomeExampleLLMJudge } from '../example-llm-judge'; +import { welcome as welcomeExampleStreaming } from '../example-streaming'; import { welcome as welcomeFrameworksProvider } from '../frameworks-provider'; import { welcome as welcomeGatewayByoToken } from '../gateway-byo-token'; import { welcome as welcomeGatewayProvider } from '../gateway-provider'; @@ -50,6 +52,8 @@ export default async function Agent( { name: 'example-chat', welcome: welcomeExampleChat }, { name: 'example-composio', welcome: welcomeExampleComposio }, { name: 'example-discord', welcome: welcomeExampleDiscord }, + { name: 'example-llm-judge', welcome: welcomeExampleLLMJudge }, + { name: 'example-streaming', welcome: welcomeExampleStreaming }, { name: 'frameworks-provider', welcome: welcomeFrameworksProvider }, { name: 'gateway-byo-token', welcome: welcomeGatewayByoToken }, { name: 'gateway-provider', welcome: welcomeGatewayProvider }, @@ -65,6 +69,8 @@ export default async function Agent( { name: 'storage-object-store', welcome: welcomeStorageObjectStore }, { name: 'storage-vector', welcome: welcomeStorageVector }, // { name: 'example-slack', welcome: welcomeExampleSlack }, + // { name: 'example-teams', welcome: welcomeExampleTeams }, + // { name: 'example-telegram', welcome: welcomeExampleTelegram }, // { name: 'io-cron', welcome: welcomeIOCron }, // { name: 'io-email', welcome: welcomeIOEmail }, // { name: 'io-sms', welcome: welcomeIOSMS }, @@ -95,27 +101,51 @@ export default async function Agent( // Agents without prompts // io-cron - // The request of this agent comes via inbound cron IO automatically, nothing to do here + // The request of this agent comes via inbound cron IO automatically; nothing to do here // io-email - // The response of this agent is sent via outbound email IO automatically, nothing to do here + // The response of this agent is sent via outbound email IO automatically; nothing to do here // io-sms - // The response of this agent is sent via outbound SMS IO automatically, nothing to do here + // The response of this agent is sent via outbound SMS IO automatically; nothing to do here + + // example-telegram + // Telegram bots cannot initiate conversations and the agent filters bot messages; nothing to do here // example-slack const slack = new WebClient(process.env.SLACK_USER_TOKEN); - slack.chat.postMessage({ + await slack.chat.postMessage({ channel: process.env.SLACK_CHANNEL_TEST || '', text: `Daily test message to <@${process.env.SLACK_BOT_ID}>. Please respond with a simple "Hello" to confirm you're working.`, }); - // Tests complete - slack.chat.postMessage({ - channel: process.env.SLACK_CHANNEL_ALERTS || '', - text: `✅ Kitchen Sink test suite completed successfully.`, - }); + // example-teams - Proactive messaging + try { + const teamsUserKey = process.env.TEAMS_TEST_USER_KEY; + + if (teamsUserKey) { + ctx.logger.info('Testing example-teams proactive messaging'); + + const teamsAgent = await ctx.getAgent({ name: 'example-teams' }); + + await teamsAgent.run({ + data: JSON.stringify({ + userKey: teamsUserKey, + text: 'Daily test message from test-suite. Automated testing is working!', + }), + contentType: 'application/json', + }); + } else { + ctx.logger.warn('TEAMS_TEST_USER_KEY not set, skipping Teams test'); + } + } catch (error) { + ctx.logger.error('Teams test failed', error); + // Don't fail entire test suite if Teams test fails + } + + // Tests complete - ping Checkly on success + await handleSuccess(ctx, 'test-suite', process.env.CHECKLY_TEST_SUITE_URL); ctx.logger.info('Test completed successfully'); diff --git a/src/lib/utils.ts b/src/lib/utils.ts index 7aef9fa..e492c2d 100644 --- a/src/lib/utils.ts +++ b/src/lib/utils.ts @@ -112,3 +112,40 @@ export const handleError = (agent: string, prompt?: number) => { }) .catch((error) => console.error('Error sending Slack message:', error)); }; + +export const handleSuccess = async ( + ctx: AgentContext, + source: string, + heartbeatUrl?: string +) => { + if (!heartbeatUrl) { + const error = `No heartbeat URL configured for ${source}`; + ctx.logger.error(error); + throw new Error(error); + } + + try { + const response = await fetch(heartbeatUrl, { + method: 'GET', + headers: { Origin: source }, + signal: AbortSignal.timeout(5000), // 5 second timeout + }); + + if (!response.ok) { + throw new Error( + `Heartbeat ping returned status ${response.status} for ${source}. Checkly URL: ${heartbeatUrl}` + ); + } + } catch (error) { + const errorName = error instanceof Error ? error.name : 'Unknown'; + const errorMessage = error instanceof Error ? error.message : String(error); + + if (errorName === 'TimeoutError' || errorName === 'AbortError') { + throw new Error( + `Heartbeat request timed out for ${source} after 5 seconds` + ); + } + + throw new Error(`Heartbeat request failed for ${source}: ${errorMessage}`); + } +};