From c16b23b3a087fbbfa26d4c49b5d69a84f77f9663 Mon Sep 17 00:00:00 2001 From: huimiu Date: Tue, 12 Aug 2025 16:19:53 +0800 Subject: [PATCH 01/21] feat: add number guessing workflow with custom agent executors --- samples/workflows/simple_workflow.py | 160 +++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 samples/workflows/simple_workflow.py diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py new file mode 100644 index 0000000..c5e2e36 --- /dev/null +++ b/samples/workflows/simple_workflow.py @@ -0,0 +1,160 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os + +from agent_framework import ChatMessage, ChatRole +from agent_framework._agents import ChatClientAgent +from agent_framework.workflow import ( + AgentRunEvent, + WorkflowBuilder, + WorkflowCompletedEvent, +) +from agent_framework_foundry._chat_client import FoundryChatClient +from agent_framework_workflow._executor import ( + AgentExecutor, + AgentExecutorRequest, + AgentExecutorResponse, + handler, +) +from agent_framework_workflow._workflow_context import WorkflowContext +from azure.ai.projects.aio import AIProjectClient +from azure.identity.aio import AzureCliCredential + +""" +The following sample demonstrates a basic workflow with two executors +where one executor guesses a number and the other executor judges the +guess iteratively. This version uses AgentExecutor with specific +instructions to implement the logic through natural language rather +than hardcoded algorithms. +""" + + +class GuessAgentExecutor(AgentExecutor): + """Custom AgentExecutor for the guessing agent that can handle both requests and responses.""" + + @handler(output_types=[AgentExecutorResponse]) + async def handle_start_message(self, message: str, ctx: WorkflowContext) -> None: + """Handle the initial start message and convert it to a request for the guesser.""" + + chat_message = ChatMessage(ChatRole.USER, text=message) + request = AgentExecutorRequest(messages=[chat_message], should_respond=True) + await self.run(request, ctx) + + @handler(output_types=[AgentExecutorResponse]) + async def handle_judge_response( + self, response: AgentExecutorResponse, ctx: WorkflowContext + ) -> None: + """Handle response from the judge and convert it to a request for the guesser.""" + + messages = response.agent_run_response.messages + if messages and messages[-1].text.lower().strip() == "correct": + await ctx.add_event( + WorkflowCompletedEvent( + data="Number guessing game completed successfully!" + ) + ) + return + + request = AgentExecutorRequest(messages=messages, should_respond=True) + await self.run(request, ctx) + + +class JudgeAgentExecutor(AgentExecutor): + """Custom AgentExecutor for the judging agent that can handle both requests and responses.""" + + @handler(output_types=[AgentExecutorResponse]) + async def handle_guess_response( + self, response: AgentExecutorResponse, ctx: WorkflowContext + ) -> None: + """Handle response from the guesser and convert it to a request for the judge.""" + + messages = response.agent_run_response.messages + request = AgentExecutorRequest(messages=messages, should_respond=True) + await self.run(request, ctx) + + +async def main(): + """Main function to run the workflow.""" + + credential = AzureCliCredential() + client = AIProjectClient( + endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential + ) + guess_agent = await client.agents.create_agent( + model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="GuessAgent" + ) + + judge_agent = await client.agents.create_agent( + model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="JudgeAgent" + ) + + try: + # Step 1: Create agent-based executors with specific instructions + guess_number_executor = GuessAgentExecutor( + ChatClientAgent( + chat_client=FoundryChatClient(client=client, agent_id=guess_agent.id), + instructions=( + "You are a number guessing agent. Your task is to guess a number between 1 and 100 using binary search strategy. " + "Binary search strategy: " + "1. Initial range: lower_bound=1, upper_bound=100 " + "2. Always guess the midpoint of the current range: (lower_bound + upper_bound) // 2 " + "3. Adjust the range based on feedback: " + " - If guess is 'too low' or 'below target', set lower_bound = current_guess + 1 " + " - If guess is 'too high' or 'above target', set upper_bound = current_guess - 1 " + " - If guess is 'correct' or 'matched', you've found the answer! " + "4. Repeat until you find the correct answer " + "When you receive 'start' or any initial message, make your first guess as 50 (midpoint of 1-100). " + "Always respond with just the integer number you're guessing." + ), + ), + id="guesser", + ) + + judge_number_executor = JudgeAgentExecutor( + ChatClientAgent( + chat_client=FoundryChatClient(client=client, agent_id=judge_agent.id), + instructions=( + "You are a number judging agent. Your target number is 30. " + "When you receive a guessed number, compare it to your target (30) and respond with exactly: " + "- 'correct' if the guess equals 30 " + "- 'too low' if the guess is less than 30 " + "- 'too high' if the guess is greater than 30 " + "Always respond with only these exact phrases, nothing more." + ), + ), + id="judge", + ) + + # Step 2: Build the workflow with the defined edges. + workflow = ( + WorkflowBuilder() + .add_edge(guess_number_executor, judge_number_executor) + .add_edge(judge_number_executor, guess_number_executor) + .set_start_executor(guess_number_executor) + .build() + ) + + # Step 3: Run the workflow and let agents decide when to complete. + async for event in workflow.run_streaming("start"): + if isinstance(event, AgentRunEvent): + print(f"{event.executor_id}: {event.data}") + elif isinstance(event, WorkflowCompletedEvent): + print(f"šŸŽ‰ {event.data}") + break + + finally: + # Cleanup: Delete agents and close clients + try: + if guess_agent: + await client.agents.delete_agent(guess_agent.id) + if judge_agent: + await client.agents.delete_agent(judge_agent.id) + await client.close() + await credential.close() + except Exception: + pass + + +if __name__ == "__main__": + asyncio.run(main()) From 5f7b0cc8f2073e2738d509fab4bb1729debf118c Mon Sep 17 00:00:00 2001 From: huimiu Date: Tue, 12 Aug 2025 17:35:25 +0800 Subject: [PATCH 02/21] feat: enhance guessing and judging agent instructions for clarity and efficiency --- samples/workflows/simple_workflow.py | 61 ++++++++++++++++++---------- 1 file changed, 40 insertions(+), 21 deletions(-) diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py index c5e2e36..76ebff8 100644 --- a/samples/workflows/simple_workflow.py +++ b/samples/workflows/simple_workflow.py @@ -94,19 +94,30 @@ async def main(): guess_number_executor = GuessAgentExecutor( ChatClientAgent( chat_client=FoundryChatClient(client=client, agent_id=guess_agent.id), - instructions=( - "You are a number guessing agent. Your task is to guess a number between 1 and 100 using binary search strategy. " - "Binary search strategy: " - "1. Initial range: lower_bound=1, upper_bound=100 " - "2. Always guess the midpoint of the current range: (lower_bound + upper_bound) // 2 " - "3. Adjust the range based on feedback: " - " - If guess is 'too low' or 'below target', set lower_bound = current_guess + 1 " - " - If guess is 'too high' or 'above target', set upper_bound = current_guess - 1 " - " - If guess is 'correct' or 'matched', you've found the answer! " - "4. Repeat until you find the correct answer " - "When you receive 'start' or any initial message, make your first guess as 50 (midpoint of 1-100). " - "Always respond with just the integer number you're guessing." - ), + instructions="""You are a number guessing agent playing a guessing game. I need to find a number between 1 and 100. + + IMPORTANT RULES: + 1. NEVER repeat the same guess twice + 2. Use binary search strategy to be efficient + 3. Always respond with ONLY the number, nothing else + 4. Keep track of what you've learned from previous guesses + + BINARY SEARCH STRATEGY: + - Start with middle of current range + - If 'too low': the number is higher, so guess higher + - If 'too high': the number is lower, so guess lower + - Always eliminate half the remaining possibilities + + EXAMPLE SEQUENCE (target is 30): + Range 1-100: guess 50 → 'too high' → range becomes 1-49 + Range 1-49: guess 25 → 'too low' → range becomes 26-49 + Range 26-49: guess 37 → 'too high' → range becomes 26-36 + Range 26-36: guess 31 → 'too high' → range becomes 26-30 + Range 26-30: guess 28 → 'too low' → range becomes 29-30 + Range 29-30: guess 30 → 'correct' + + CRITICAL: If you just guessed 25 and got 'too high', your next guess must be LOWER than 25! + Think step by step about your range and pick the middle of the valid range.""", ), id="guesser", ) @@ -114,14 +125,22 @@ async def main(): judge_number_executor = JudgeAgentExecutor( ChatClientAgent( chat_client=FoundryChatClient(client=client, agent_id=judge_agent.id), - instructions=( - "You are a number judging agent. Your target number is 30. " - "When you receive a guessed number, compare it to your target (30) and respond with exactly: " - "- 'correct' if the guess equals 30 " - "- 'too low' if the guess is less than 30 " - "- 'too high' if the guess is greater than 30 " - "Always respond with only these exact phrases, nothing more." - ), + instructions="""You are a number judging agent. The secret number you're thinking of is 30. + Your job is to compare each guess to 30 and give feedback. + + RESPONSE RULES - respond with EXACTLY these phrases: + • If the guess is less than 30: say 'too low' + • If the guess is greater than 30: say 'too high' + • If the guess equals 30: say 'correct' + + EXAMPLES: + Guess 15 → '15 < 30' → respond 'too low' + Guess 25 → '25 < 30' → respond 'too low' + Guess 35 → '35 > 30' → respond 'too high' + Guess 45 → '45 > 30' → respond 'too high' + Guess 30 → '30 = 30' → respond 'correct' + + IMPORTANT: Only respond with the three exact phrases above. Nothing else.""", ), id="judge", ) From 581082abc93a37febe78e9f13457927fbce8714e Mon Sep 17 00:00:00 2001 From: huimiu Date: Tue, 12 Aug 2025 23:36:51 +0800 Subject: [PATCH 03/21] feat: refactor guessing game workflow to implement student-teacher interaction with clear roles and instructions --- samples/workflows/simple_workflow.py | 152 ++++++++++----------------- 1 file changed, 58 insertions(+), 94 deletions(-) diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py index 76ebff8..0ad55df 100644 --- a/samples/workflows/simple_workflow.py +++ b/samples/workflows/simple_workflow.py @@ -21,154 +21,118 @@ from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import AzureCliCredential -""" -The following sample demonstrates a basic workflow with two executors -where one executor guesses a number and the other executor judges the -guess iteratively. This version uses AgentExecutor with specific -instructions to implement the logic through natural language rather -than hardcoded algorithms. -""" + +class StudentAgentExecutor(AgentExecutor): + @handler(output_types=[AgentExecutorResponse]) + async def handle_teacher_question( + self, response: AgentExecutorResponse, ctx: WorkflowContext + ) -> None: + messages = response.agent_run_response.messages + request = AgentExecutorRequest(messages=messages, should_respond=True) + await self.run(request, ctx) -class GuessAgentExecutor(AgentExecutor): - """Custom AgentExecutor for the guessing agent that can handle both requests and responses.""" +class TeacherAgentExecutor(AgentExecutor): + def __init__(self, agent, id="teacher"): + super().__init__(agent, id=id) + self.turn_count = 0 @handler(output_types=[AgentExecutorResponse]) async def handle_start_message(self, message: str, ctx: WorkflowContext) -> None: - """Handle the initial start message and convert it to a request for the guesser.""" - chat_message = ChatMessage(ChatRole.USER, text=message) request = AgentExecutorRequest(messages=[chat_message], should_respond=True) await self.run(request, ctx) @handler(output_types=[AgentExecutorResponse]) - async def handle_judge_response( + async def handle_student_answer( self, response: AgentExecutorResponse, ctx: WorkflowContext ) -> None: - """Handle response from the judge and convert it to a request for the guesser.""" + self.turn_count += 1 - messages = response.agent_run_response.messages - if messages and messages[-1].text.lower().strip() == "correct": + if self.turn_count >= 5: await ctx.add_event( WorkflowCompletedEvent( - data="Number guessing game completed successfully!" + data="Student-teacher conversation completed after 5 turns!" ) ) return - request = AgentExecutorRequest(messages=messages, should_respond=True) - await self.run(request, ctx) - - -class JudgeAgentExecutor(AgentExecutor): - """Custom AgentExecutor for the judging agent that can handle both requests and responses.""" - - @handler(output_types=[AgentExecutorResponse]) - async def handle_guess_response( - self, response: AgentExecutorResponse, ctx: WorkflowContext - ) -> None: - """Handle response from the guesser and convert it to a request for the judge.""" - messages = response.agent_run_response.messages + if messages and "[COMPLETE]" in messages[-1].text.upper(): + await ctx.add_event( + WorkflowCompletedEvent( + data="Student-teacher conversation completed by teacher!" + ) + ) + return + request = AgentExecutorRequest(messages=messages, should_respond=True) await self.run(request, ctx) async def main(): - """Main function to run the workflow.""" - credential = AzureCliCredential() client = AIProjectClient( endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential ) - guess_agent = await client.agents.create_agent( - model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="GuessAgent" + student_agent = await client.agents.create_agent( + model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="StudentAgent" ) - - judge_agent = await client.agents.create_agent( - model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="JudgeAgent" + teacher_agent = await client.agents.create_agent( + model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="TeacherAgent" ) try: - # Step 1: Create agent-based executors with specific instructions - guess_number_executor = GuessAgentExecutor( + student_executor = StudentAgentExecutor( ChatClientAgent( - chat_client=FoundryChatClient(client=client, agent_id=guess_agent.id), - instructions="""You are a number guessing agent playing a guessing game. I need to find a number between 1 and 100. + chat_client=FoundryChatClient(client=client, agent_id=student_agent.id), + instructions="""You are Jamie, a student. Your role is to answer the teacher's questions briefly and clearly. IMPORTANT RULES: - 1. NEVER repeat the same guess twice - 2. Use binary search strategy to be efficient - 3. Always respond with ONLY the number, nothing else - 4. Keep track of what you've learned from previous guesses - - BINARY SEARCH STRATEGY: - - Start with middle of current range - - If 'too low': the number is higher, so guess higher - - If 'too high': the number is lower, so guess lower - - Always eliminate half the remaining possibilities - - EXAMPLE SEQUENCE (target is 30): - Range 1-100: guess 50 → 'too high' → range becomes 1-49 - Range 1-49: guess 25 → 'too low' → range becomes 26-49 - Range 26-49: guess 37 → 'too high' → range becomes 26-36 - Range 26-36: guess 31 → 'too high' → range becomes 26-30 - Range 26-30: guess 28 → 'too low' → range becomes 29-30 - Range 29-30: guess 30 → 'correct' - - CRITICAL: If you just guessed 25 and got 'too high', your next guess must be LOWER than 25! - Think step by step about your range and pick the middle of the valid range.""", + 1. Answer questions directly and concisely + 2. Keep responses short (1-2 sentences maximum) + 3. Do NOT ask questions back""", ), - id="guesser", + id="student", ) - judge_number_executor = JudgeAgentExecutor( + teacher_executor = TeacherAgentExecutor( ChatClientAgent( - chat_client=FoundryChatClient(client=client, agent_id=judge_agent.id), - instructions="""You are a number judging agent. The secret number you're thinking of is 30. - Your job is to compare each guess to 30 and give feedback. - - RESPONSE RULES - respond with EXACTLY these phrases: - • If the guess is less than 30: say 'too low' - • If the guess is greater than 30: say 'too high' - • If the guess equals 30: say 'correct' - - EXAMPLES: - Guess 15 → '15 < 30' → respond 'too low' - Guess 25 → '25 < 30' → respond 'too low' - Guess 35 → '35 > 30' → respond 'too high' - Guess 45 → '45 > 30' → respond 'too high' - Guess 30 → '30 = 30' → respond 'correct' - - IMPORTANT: Only respond with the three exact phrases above. Nothing else.""", + chat_client=FoundryChatClient(client=client, agent_id=teacher_agent.id), + instructions="""You are Dr. Smith, a teacher. Your role is to ask the student different, simple questions to test their knowledge. + + IMPORTANT RULES: + 1. Ask ONE simple question at a time + 2. NEVER repeat the same question twice + 3. Ask DIFFERENT topics each time (science, math, history, geography, etc.) + 4. Keep questions short and clear + 5. Do NOT provide explanations - only ask questions""", ), - id="judge", + id="teacher", ) - # Step 2: Build the workflow with the defined edges. workflow = ( WorkflowBuilder() - .add_edge(guess_number_executor, judge_number_executor) - .add_edge(judge_number_executor, guess_number_executor) - .set_start_executor(guess_number_executor) + .add_edge(teacher_executor, student_executor) + .add_edge(student_executor, teacher_executor) + .set_start_executor(teacher_executor) .build() ) - # Step 3: Run the workflow and let agents decide when to complete. - async for event in workflow.run_streaming("start"): + async for event in workflow.run_streaming("Start the quiz session."): if isinstance(event, AgentRunEvent): - print(f"{event.executor_id}: {event.data}") + agent_name = event.executor_id + print(f"\n{agent_name}: {event.data}") elif isinstance(event, WorkflowCompletedEvent): - print(f"šŸŽ‰ {event.data}") + print(f"\nšŸŽ‰ {event.data}") break finally: - # Cleanup: Delete agents and close clients try: - if guess_agent: - await client.agents.delete_agent(guess_agent.id) - if judge_agent: - await client.agents.delete_agent(judge_agent.id) + if student_agent: + await client.agents.delete_agent(student_agent.id) + if teacher_agent: + await client.agents.delete_agent(teacher_agent.id) await client.close() await credential.close() except Exception: From d1597a03d31c3e9a8c8dee7c24240bc31de710cf Mon Sep 17 00:00:00 2001 From: Hui Miao Date: Tue, 9 Sep 2025 17:42:21 +0800 Subject: [PATCH 04/21] Update python workflow sample Refactor Student and Teacher agent executors to use ChatAgent and update tracing setup. --- samples/workflows/simple_workflow.py | 220 ++++++++++++++++++--------- 1 file changed, 149 insertions(+), 71 deletions(-) diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py index 0ad55df..86d4681 100644 --- a/samples/workflows/simple_workflow.py +++ b/samples/workflows/simple_workflow.py @@ -3,114 +3,193 @@ import asyncio import os -from agent_framework import ChatMessage, ChatRole -from agent_framework._agents import ChatClientAgent +from agent_framework import ChatAgent, ChatMessage, Role from agent_framework.workflow import ( - AgentRunEvent, + Executor, WorkflowBuilder, WorkflowCompletedEvent, -) -from agent_framework_foundry._chat_client import FoundryChatClient -from agent_framework_workflow._executor import ( - AgentExecutor, - AgentExecutorRequest, - AgentExecutorResponse, + WorkflowContext, handler, ) -from agent_framework_workflow._workflow_context import WorkflowContext +from agent_framework_foundry import FoundryChatClient from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import AzureCliCredential +from opentelemetry.sdk.resources import Resource +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.semconv.attributes import service_attributes +from opentelemetry.trace import set_tracer_provider + +try: + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter +except ImportError: + OTLPSpanExporter = None + + +# Load settings from environment variables +otlp_endpoint = f"http://localhost:{os.getenv('FOUNDRY_OTLP_PORT', '4317')}" + + +# Configure tracing to capture telemetry spans for visualization. +def set_up_tracing(): + if otlp_endpoint and OTLPSpanExporter: + exporter = OTLPSpanExporter(endpoint=otlp_endpoint) + resource = Resource.create( + {service_attributes.SERVICE_NAME: "StudentTeacherWorkflow"} + ) + tracer_provider = TracerProvider(resource=resource) + tracer_provider.add_span_processor(BatchSpanProcessor(exporter)) + set_tracer_provider(tracer_provider) + + +class StudentAgentExecutor(Executor): + """ + StudentAgentExecutor + + Executor that handles a "teacher question" event by re-invoking the agent with + the current conversation messages and requesting a response. + + Parameters (for the handler): + - response: AgentExecutorResponse containing the prior agent run result and messages. + - ctx: WorkflowContext[None] used to carry workflow-level state, cancellation, or metadata. + """ + agent: ChatAgent -class StudentAgentExecutor(AgentExecutor): - @handler(output_types=[AgentExecutorResponse]) + def __init__(self, agent: ChatAgent, id="student"): + super().__init__(agent=agent, id=id) + + @handler async def handle_teacher_question( - self, response: AgentExecutorResponse, ctx: WorkflowContext + self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]] ) -> None: - messages = response.agent_run_response.messages - request = AgentExecutorRequest(messages=messages, should_respond=True) - await self.run(request, ctx) + # wait 2 seconds to simulate "thinking" + await asyncio.sleep(2) + + response = await self.agent.run(messages) + # Extract just the text content from the last message + print(f"Student: {response.messages[-1].contents[-1].text}") + + messages.extend(response.messages) + await ctx.send_message(messages) + + +class TeacherAgentExecutor(Executor): + """ + TeacherAgentExecutor + Orchestrates the "teacher" side of the student-teacher workflow. -class TeacherAgentExecutor(AgentExecutor): - def __init__(self, agent, id="teacher"): - super().__init__(agent, id=id) - self.turn_count = 0 + - Start the conversation by sending the initial teacher prompt to the agent. + - Receive the student's responses, track the number of turns, and decide when to + end the workflow (either after a configured number of turns or when a completion + token is observed). + - Re-invoke the teacher agent to ask the next question when appropriate. + """ - @handler(output_types=[AgentExecutorResponse]) - async def handle_start_message(self, message: str, ctx: WorkflowContext) -> None: - chat_message = ChatMessage(ChatRole.USER, text=message) - request = AgentExecutorRequest(messages=[chat_message], should_respond=True) - await self.run(request, ctx) + turn_count: int = 0 + agent: ChatAgent - @handler(output_types=[AgentExecutorResponse]) + def __init__(self, agent: ChatAgent, id="teacher"): + super().__init__(agent=agent, id=id, turn_count=0) + + @handler + async def handle_start_message( + self, message: str, ctx: WorkflowContext[list[ChatMessage]] + ) -> None: + """ + Handle the initial start message for the teacher. + + The incoming message is treated as a user chat message sent to the teacher agent. + We wrap it in a ChatMessage and create an AgentExecutorRequest asking the agent + to respond. + """ + # Build a user message for the teacher agent and request a response + chat_message = ChatMessage(Role.USER, text=message) + messages: list[ChatMessage] = [chat_message] + response = await self.agent.run(messages) + # Extract just the text content from the last message + print(f"Teacher: {response.messages[-1].contents[-1].text}") + + messages.extend(response.messages) + await ctx.send_message(messages) + + @handler async def handle_student_answer( - self, response: AgentExecutorResponse, ctx: WorkflowContext + self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]] ) -> None: + """ + Handle the student's answer (a list of ChatMessages). + + Behavior: + - Increment the turn counter each time the teacher processes a student's answer. + - If the turn limit is reached, emit a WorkflowCompletedEvent to end the workflow. + - Otherwise, forward the conversation messages back to the teacher agent and request + the next question. + """ + # wait 2 seconds to simulate "thinking" + await asyncio.sleep(2) self.turn_count += 1 + # End after 5 turns to avoid infinite conversation loops if self.turn_count >= 5: - await ctx.add_event( - WorkflowCompletedEvent( - data="Student-teacher conversation completed after 5 turns!" - ) - ) + await ctx.add_event(WorkflowCompletedEvent()) return - messages = response.agent_run_response.messages - if messages and "[COMPLETE]" in messages[-1].text.upper(): - await ctx.add_event( - WorkflowCompletedEvent( - data="Student-teacher conversation completed by teacher!" - ) - ) - return + # Otherwise, ask the teacher agent to produce the next question using the current messages + response = await self.agent.run(messages) + print(f"Teacher: {response.messages[-1].contents[-1].text}") - request = AgentExecutorRequest(messages=messages, should_respond=True) - await self.run(request, ctx) + messages.extend(response.messages) + await ctx.send_message(messages) async def main(): + set_up_tracing() + credential = AzureCliCredential() client = AIProjectClient( endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential ) + + # Create the Student and Teacher agents student_agent = await client.agents.create_agent( - model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="StudentAgent" + model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], + name="StudentAgent", + instructions="""You are Jamie, a student. Your role is to answer the teacher's questions briefly and clearly. + + IMPORTANT RULES: + 1. Answer questions directly and concisely + 2. Keep responses short (1-2 sentences maximum) + 3. Do NOT ask questions back""", ) teacher_agent = await client.agents.create_agent( - model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], name="TeacherAgent" + model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], + name="TeacherAgent", + instructions="""You are Dr. Smith, a teacher. Your role is to ask the student different, simple questions to test their knowledge. + + IMPORTANT RULES: + 1. Ask ONE simple question at a time + 2. NEVER repeat the same question twice + 3. Ask DIFFERENT topics each time (science, math, history, geography, etc.) + 4. Keep questions short and clear + 5. Do NOT provide explanations - only ask questions""", ) try: student_executor = StudentAgentExecutor( - ChatClientAgent( - chat_client=FoundryChatClient(client=client, agent_id=student_agent.id), - instructions="""You are Jamie, a student. Your role is to answer the teacher's questions briefly and clearly. - - IMPORTANT RULES: - 1. Answer questions directly and concisely - 2. Keep responses short (1-2 sentences maximum) - 3. Do NOT ask questions back""", + ChatAgent( + chat_client=FoundryChatClient(client=client, agent_id=student_agent.id) ), - id="student", ) teacher_executor = TeacherAgentExecutor( - ChatClientAgent( - chat_client=FoundryChatClient(client=client, agent_id=teacher_agent.id), - instructions="""You are Dr. Smith, a teacher. Your role is to ask the student different, simple questions to test their knowledge. - - IMPORTANT RULES: - 1. Ask ONE simple question at a time - 2. NEVER repeat the same question twice - 3. Ask DIFFERENT topics each time (science, math, history, geography, etc.) - 4. Keep questions short and clear - 5. Do NOT provide explanations - only ask questions""", + ChatAgent( + chat_client=FoundryChatClient(client=client, agent_id=teacher_agent.id) ), - id="teacher", ) + # Define the workflow orchestration workflow = ( WorkflowBuilder() .add_edge(teacher_executor, student_executor) @@ -119,16 +198,15 @@ async def main(): .build() ) - async for event in workflow.run_streaming("Start the quiz session."): - if isinstance(event, AgentRunEvent): - agent_name = event.executor_id - print(f"\n{agent_name}: {event.data}") - elif isinstance(event, WorkflowCompletedEvent): - print(f"\nšŸŽ‰ {event.data}") - break + async for event in workflow.run_stream("Start the quiz session."): + if isinstance(event, WorkflowCompletedEvent): + print(f"\nšŸŽ‰ Student-teacher conversation completed after 5 turns!") + except Exception as e: + print(f"Error running workflow: {e}") finally: try: + # Clean up the agents if student_agent: await client.agents.delete_agent(student_agent.id) if teacher_agent: From 86ac02e26d114526e675230a941328edad9d8514 Mon Sep 17 00:00:00 2001 From: Hui Miao Date: Sun, 28 Sep 2025 14:58:50 +0800 Subject: [PATCH 05/21] Refactor student-teacher workflow implementation Refactor the student-teacher workflow by updating imports, adding dataclass for responses, and modifying agent execution logic. --- samples/workflows/simple_workflow.py | 276 +++++++++++---------------- 1 file changed, 115 insertions(+), 161 deletions(-) diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py index 86d4681..82e12e9 100644 --- a/samples/workflows/simple_workflow.py +++ b/samples/workflows/simple_workflow.py @@ -1,59 +1,52 @@ -# Copyright (c) Microsoft. All rights reserved. - import asyncio import os - -from agent_framework import ChatAgent, ChatMessage, Role -from agent_framework.workflow import ( +from dataclasses import dataclass +from uuid import uuid4 + +from agent_framework import ( + AgentRunResponseUpdate, + AgentRunUpdateEvent, + ChatAgent, + ChatMessage, Executor, + Role, + TextContent, WorkflowBuilder, - WorkflowCompletedEvent, WorkflowContext, handler, ) -from agent_framework_foundry import FoundryChatClient -from azure.ai.projects.aio import AIProjectClient -from azure.identity.aio import AzureCliCredential -from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor -from opentelemetry.semconv.attributes import service_attributes -from opentelemetry.trace import set_tracer_provider - -try: - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter -except ImportError: - OTLPSpanExporter = None - - -# Load settings from environment variables -otlp_endpoint = f"http://localhost:{os.getenv('FOUNDRY_OTLP_PORT', '4317')}" - - -# Configure tracing to capture telemetry spans for visualization. -def set_up_tracing(): - if otlp_endpoint and OTLPSpanExporter: - exporter = OTLPSpanExporter(endpoint=otlp_endpoint) - resource = Resource.create( - {service_attributes.SERVICE_NAME: "StudentTeacherWorkflow"} - ) - tracer_provider = TracerProvider(resource=resource) - tracer_provider.add_span_processor(BatchSpanProcessor(exporter)) - set_tracer_provider(tracer_provider) +from agent_framework.azure import AzureOpenAIChatClient +from agent_framework.observability import setup_observability +from dotenv import load_dotenv +load_dotenv() + +# ============================================================================= +# USER CONFIGURATION - SET THESE AS ENVIRONMENT VARIABLES +# ============================================================================= +AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") +AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY") +MODEL_DEPLOYMENT_NAME = os.getenv("MODEL_DEPLOYMENT_NAME") + +# ============================================================================= -class StudentAgentExecutor(Executor): - """ - StudentAgentExecutor - Executor that handles a "teacher question" event by re-invoking the agent with - the current conversation messages and requesting a response. +@dataclass +class StudentResponse: + messages: list[ChatMessage] - Parameters (for the handler): - - response: AgentExecutorResponse containing the prior agent run result and messages. - - ctx: WorkflowContext[None] used to carry workflow-level state, cancellation, or metadata. - """ +def create_openai_chat_client(): + """Create OpenAI chat client with explicit settings.""" + + return AzureOpenAIChatClient( + api_key=AZURE_OPENAI_API_KEY, + deployment_name=MODEL_DEPLOYMENT_NAME, + endpoint=AZURE_OPENAI_ENDPOINT, + ) + + +class StudentAgentExecutor(Executor): agent: ChatAgent def __init__(self, agent: ChatAgent, id="student"): @@ -61,160 +54,121 @@ def __init__(self, agent: ChatAgent, id="student"): @handler async def handle_teacher_question( - self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]] + self, messages: list[ChatMessage], ctx: WorkflowContext[StudentResponse] ) -> None: - # wait 2 seconds to simulate "thinking" - await asyncio.sleep(2) + if messages and "completed" in messages[-1].contents[-1].text.lower(): + await ctx.yield_output( + "šŸŽ‰ Student-teacher conversation completed after 2 turns!" + ) + return response = await self.agent.run(messages) - # Extract just the text content from the last message print(f"Student: {response.messages[-1].contents[-1].text}") + for message in response.messages: + if message.role == Role.ASSISTANT: + await ctx.add_event( + AgentRunUpdateEvent( + self.id, + data=AgentRunResponseUpdate( + contents=[TextContent(text=f"Student: {message.contents[-1].text}")], + role=Role.ASSISTANT, + response_id=str(uuid4()), + ), + ) + ) + messages.extend(response.messages) - await ctx.send_message(messages) + await ctx.send_message(StudentResponse(messages=messages)) class TeacherAgentExecutor(Executor): - """ - TeacherAgentExecutor - - Orchestrates the "teacher" side of the student-teacher workflow. - - - Start the conversation by sending the initial teacher prompt to the agent. - - Receive the student's responses, track the number of turns, and decide when to - end the workflow (either after a configured number of turns or when a completion - token is observed). - - Re-invoke the teacher agent to ask the next question when appropriate. - """ - - turn_count: int = 0 agent: ChatAgent def __init__(self, agent: ChatAgent, id="teacher"): - super().__init__(agent=agent, id=id, turn_count=0) + super().__init__(agent=agent, id=id) - @handler - async def handle_start_message( - self, message: str, ctx: WorkflowContext[list[ChatMessage]] - ) -> None: - """ - Handle the initial start message for the teacher. - - The incoming message is treated as a user chat message sent to the teacher agent. - We wrap it in a ChatMessage and create an AgentExecutorRequest asking the agent - to respond. - """ - # Build a user message for the teacher agent and request a response - chat_message = ChatMessage(Role.USER, text=message) - messages: list[ChatMessage] = [chat_message] - response = await self.agent.run(messages) - # Extract just the text content from the last message + async def _handle_response(self, messages, ctx, response): print(f"Teacher: {response.messages[-1].contents[-1].text}") - + for message in response.messages: + if message.role == Role.ASSISTANT: + await ctx.add_event( + AgentRunUpdateEvent( + self.id, + data=AgentRunResponseUpdate( + contents=[TextContent(text=f"Teacher: {message.contents[-1].text}")], + role=Role.ASSISTANT, + response_id=str(uuid4()), + ), + ) + ) messages.extend(response.messages) await ctx.send_message(messages) @handler - async def handle_student_answer( + async def handle_user_message( self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]] ) -> None: - """ - Handle the student's answer (a list of ChatMessages). - - Behavior: - - Increment the turn counter each time the teacher processes a student's answer. - - If the turn limit is reached, emit a WorkflowCompletedEvent to end the workflow. - - Otherwise, forward the conversation messages back to the teacher agent and request - the next question. - """ - # wait 2 seconds to simulate "thinking" - await asyncio.sleep(2) - self.turn_count += 1 - - # End after 5 turns to avoid infinite conversation loops - if self.turn_count >= 5: - await ctx.add_event(WorkflowCompletedEvent()) - return + response = await self.agent.run(messages) + await self._handle_response(messages, ctx, response) - # Otherwise, ask the teacher agent to produce the next question using the current messages + @handler + async def handle_student_answer( + self, student_response: StudentResponse, ctx: WorkflowContext[list[ChatMessage]] + ) -> None: + messages = student_response.messages response = await self.agent.run(messages) - print(f"Teacher: {response.messages[-1].contents[-1].text}") + await self._handle_response(messages, ctx, response) - messages.extend(response.messages) - await ctx.send_message(messages) +def create_workflow_from_client(): + """Create workflow using OpenAI chat client with explicit settings.""" -async def main(): - set_up_tracing() + # Create OpenAI chat client + chat_client = create_openai_chat_client() - credential = AzureCliCredential() - client = AIProjectClient( - endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], credential=credential + # Create student agent + student_agent = chat_client.create_agent( + instructions="You are Jamie, a student. Answer teacher questions briefly (1-2 sentences). Don't ask questions back." ) - # Create the Student and Teacher agents - student_agent = await client.agents.create_agent( - model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], - name="StudentAgent", - instructions="""You are Jamie, a student. Your role is to answer the teacher's questions briefly and clearly. - - IMPORTANT RULES: - 1. Answer questions directly and concisely - 2. Keep responses short (1-2 sentences maximum) - 3. Do NOT ask questions back""", + # Create teacher agent + teacher_agent = chat_client.create_agent( + instructions="You are Dr. Smith, a teacher. Ask simple questions on different topics without numbering or formatting. Just ask the question directly. After 2 question-answer exchanges, respond with only 'Completed'. Keep questions short and clear." ) - teacher_agent = await client.agents.create_agent( - model=os.environ["FOUNDRY_MODEL_DEPLOYMENT_NAME"], - name="TeacherAgent", - instructions="""You are Dr. Smith, a teacher. Your role is to ask the student different, simple questions to test their knowledge. - - IMPORTANT RULES: - 1. Ask ONE simple question at a time - 2. NEVER repeat the same question twice - 3. Ask DIFFERENT topics each time (science, math, history, geography, etc.) - 4. Keep questions short and clear - 5. Do NOT provide explanations - only ask questions""", + + # Create executors + student_executor = StudentAgentExecutor(student_agent) + teacher_executor = TeacherAgentExecutor(teacher_agent) + + workflow = ( + WorkflowBuilder() + .add_edge(teacher_executor, student_executor) + .add_edge(student_executor, teacher_executor) + .set_start_executor(teacher_executor) + .build() ) - try: - student_executor = StudentAgentExecutor( - ChatAgent( - chat_client=FoundryChatClient(client=client, agent_id=student_agent.id) - ), - ) + return workflow - teacher_executor = TeacherAgentExecutor( - ChatAgent( - chat_client=FoundryChatClient(client=client, agent_id=teacher_agent.id) - ), - ) - # Define the workflow orchestration - workflow = ( - WorkflowBuilder() - .add_edge(teacher_executor, student_executor) - .add_edge(student_executor, teacher_executor) - .set_start_executor(teacher_executor) - .build() - ) +async def main(): + """Main function to run the student-teacher workflow.""" - async for event in workflow.run_stream("Start the quiz session."): - if isinstance(event, WorkflowCompletedEvent): - print(f"\nšŸŽ‰ Student-teacher conversation completed after 5 turns!") + # Configure observability for workflow visualization + setup_observability(vs_code_extension_port=4317) + + try: + workflow = create_workflow_from_client() + message = ChatMessage( + role=Role.USER, contents=[TextContent("Start the quiz session.")] + ) + async for _ in workflow.run_stream([message]): + pass except Exception as e: print(f"Error running workflow: {e}") - finally: - try: - # Clean up the agents - if student_agent: - await client.agents.delete_agent(student_agent.id) - if teacher_agent: - await client.agents.delete_agent(teacher_agent.id) - await client.close() - await credential.close() - except Exception: - pass + raise if __name__ == "__main__": From 00e13fa1aee2193d1619f2a2da0fb6756d888069 Mon Sep 17 00:00:00 2001 From: Hui Miao Date: Sun, 28 Sep 2025 14:59:12 +0800 Subject: [PATCH 06/21] Remove dotenv loading from simple_workflow.py Removed dotenv loading from simple_workflow.py. --- samples/workflows/simple_workflow.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py index 82e12e9..2f3f50b 100644 --- a/samples/workflows/simple_workflow.py +++ b/samples/workflows/simple_workflow.py @@ -17,9 +17,6 @@ ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.observability import setup_observability -from dotenv import load_dotenv - -load_dotenv() # ============================================================================= # USER CONFIGURATION - SET THESE AS ENVIRONMENT VARIABLES From 7d106e396247cffb2244bdef45bba4d5c3f99273 Mon Sep 17 00:00:00 2001 From: Hui Miao Date: Fri, 10 Oct 2025 15:01:00 +0800 Subject: [PATCH 07/21] Refine agent instructions and observability port Updated instructions for student and teacher agents to clarify response behavior and question format. --- samples/workflows/simple_workflow.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py index 2f3f50b..d111849 100644 --- a/samples/workflows/simple_workflow.py +++ b/samples/workflows/simple_workflow.py @@ -17,6 +17,9 @@ ) from agent_framework.azure import AzureOpenAIChatClient from agent_framework.observability import setup_observability +from dotenv import load_dotenv + +load_dotenv() # ============================================================================= # USER CONFIGURATION - SET THESE AS ENVIRONMENT VARIABLES @@ -48,6 +51,7 @@ class StudentAgentExecutor(Executor): def __init__(self, agent: ChatAgent, id="student"): super().__init__(agent=agent, id=id) + self.agent = agent @handler async def handle_teacher_question( @@ -84,6 +88,7 @@ class TeacherAgentExecutor(Executor): def __init__(self, agent: ChatAgent, id="teacher"): super().__init__(agent=agent, id=id) + self.agent = agent async def _handle_response(self, messages, ctx, response): print(f"Teacher: {response.messages[-1].contents[-1].text}") @@ -126,12 +131,12 @@ def create_workflow_from_client(): # Create student agent student_agent = chat_client.create_agent( - instructions="You are Jamie, a student. Answer teacher questions briefly (1-2 sentences). Don't ask questions back." + instructions="""You are Jamie, a student. Only answer the teacher's questions. For each teacher question, reply with a concise factual answer in one sentence (1-2 sentences max). Do not ask questions, do not add commentary or feedback, and do not provide extra information beyond the direct answer. If the teacher says 'Completed', stop and do not respond further. Keep answers short and precise.""" ) # Create teacher agent teacher_agent = chat_client.create_agent( - instructions="You are Dr. Smith, a teacher. Ask simple questions on different topics without numbering or formatting. Just ask the question directly. After 2 question-answer exchanges, respond with only 'Completed'. Keep questions short and clear." + instructions="""You are Dr. Smith, a teacher. Follow this exact pattern: Ask exactly two direct questions total, one at a time. Each question must come from a different academic subject (for example: Geography, Science, History, Math, Literature). Start immediately by asking Question 1 — do NOT ask what topic the student wants or present a list of options. After the student answers Question 1, ask Question 2 from a different subject. After the student answers Question 2, reply with only the single word Completed (capital C, no punctuation, nothing else). Do not give feedback, corrections, explanations, or ask follow-up questions. Keep each question short and direct (ideally 3-8 words).""" ) # Create executors @@ -153,7 +158,7 @@ async def main(): """Main function to run the student-teacher workflow.""" # Configure observability for workflow visualization - setup_observability(vs_code_extension_port=4317) + setup_observability(vs_code_extension_port=4319) try: workflow = create_workflow_from_client() From fd082b70cb7e920ecdfb90fd32a19d3c9370b4ea Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:15:50 +0800 Subject: [PATCH 08/21] feat: migrating hosted agent samples --- .github/workflows/package-hosted-agents.yml | 48 +++++ .../hosted-agent/dotnet/agent/.dockerignore | 57 +++++ samples/hosted-agent/dotnet/agent/.env.tpl | 4 + .../hosted-agent/dotnet/agent/Dockerfile.tpl | 20 ++ .../hosted-agent/dotnet/agent/Program.cs.tpl | 136 ++++++++++++ samples/hosted-agent/dotnet/agent/README.md | 166 ++++++++++++++ .../hosted-agent/dotnet/agent/agent.yaml.tpl | 25 +++ .../agent/appsettings.Development.json.tpl | 6 + .../dotnet/agent/run-requests.http | 52 +++++ .../dotnet/agent/{{SafeProjectName}}.csproj | 16 ++ .../hosted-agent/dotnet/minimal/.dockerignore | 51 +++++ .../hosted-agent/dotnet/minimal/Dockerfile | 21 ++ .../dotnet/workflow/.dockerignore | 57 +++++ samples/hosted-agent/dotnet/workflow/.env.tpl | 4 + .../dotnet/workflow/Dockerfile.tpl | 22 ++ .../dotnet/workflow/Program.cs.tpl | 166 ++++++++++++++ .../hosted-agent/dotnet/workflow/README.md | 169 +++++++++++++++ .../dotnet/workflow/agent.yaml.tpl | 24 +++ .../workflow/appsettings.Development.json.tpl | 4 + .../workflow/{{SafeProjectName}}.csproj | 30 +++ .../hosted-agent/python/agent/.dockerignore | 66 ++++++ samples/hosted-agent/python/agent/.env.tpl | 4 + .../python/agent/.vscode/launch.json | 17 ++ .../python/agent/.vscode/tasks.json | 70 ++++++ samples/hosted-agent/python/agent/Dockerfile | 15 ++ samples/hosted-agent/python/agent/README.md | 195 +++++++++++++++++ .../hosted-agent/python/agent/agent.yaml.tpl | 25 +++ samples/hosted-agent/python/agent/main.py | 149 +++++++++++++ .../python/agent/requirements.txt | 4 + .../hosted-agent/python/minimal/.dockerignore | 51 +++++ .../hosted-agent/python/minimal/Dockerfile | 15 ++ .../python/workflow/.dockerignore | 51 +++++ samples/hosted-agent/python/workflow/.env.tpl | 4 + .../python/workflow/.vscode/launch.json | 17 ++ .../python/workflow/.vscode/tasks.json | 70 ++++++ .../hosted-agent/python/workflow/Dockerfile | 15 ++ .../hosted-agent/python/workflow/README.md | 204 ++++++++++++++++++ .../python/workflow/agent.yaml.tpl | 24 +++ samples/hosted-agent/python/workflow/main.py | 107 +++++++++ .../python/workflow/requirements.txt | 4 + samples/hosted-agent/version-manifest.json | 21 ++ samples/workflows/simple_workflow.py | 177 --------------- 42 files changed, 2206 insertions(+), 177 deletions(-) create mode 100644 .github/workflows/package-hosted-agents.yml create mode 100644 samples/hosted-agent/dotnet/agent/.dockerignore create mode 100644 samples/hosted-agent/dotnet/agent/.env.tpl create mode 100644 samples/hosted-agent/dotnet/agent/Dockerfile.tpl create mode 100644 samples/hosted-agent/dotnet/agent/Program.cs.tpl create mode 100644 samples/hosted-agent/dotnet/agent/README.md create mode 100644 samples/hosted-agent/dotnet/agent/agent.yaml.tpl create mode 100644 samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl create mode 100644 samples/hosted-agent/dotnet/agent/run-requests.http create mode 100644 samples/hosted-agent/dotnet/agent/{{SafeProjectName}}.csproj create mode 100644 samples/hosted-agent/dotnet/minimal/.dockerignore create mode 100644 samples/hosted-agent/dotnet/minimal/Dockerfile create mode 100644 samples/hosted-agent/dotnet/workflow/.dockerignore create mode 100644 samples/hosted-agent/dotnet/workflow/.env.tpl create mode 100644 samples/hosted-agent/dotnet/workflow/Dockerfile.tpl create mode 100644 samples/hosted-agent/dotnet/workflow/Program.cs.tpl create mode 100644 samples/hosted-agent/dotnet/workflow/README.md create mode 100644 samples/hosted-agent/dotnet/workflow/agent.yaml.tpl create mode 100644 samples/hosted-agent/dotnet/workflow/appsettings.Development.json.tpl create mode 100644 samples/hosted-agent/dotnet/workflow/{{SafeProjectName}}.csproj create mode 100644 samples/hosted-agent/python/agent/.dockerignore create mode 100644 samples/hosted-agent/python/agent/.env.tpl create mode 100644 samples/hosted-agent/python/agent/.vscode/launch.json create mode 100644 samples/hosted-agent/python/agent/.vscode/tasks.json create mode 100644 samples/hosted-agent/python/agent/Dockerfile create mode 100644 samples/hosted-agent/python/agent/README.md create mode 100644 samples/hosted-agent/python/agent/agent.yaml.tpl create mode 100644 samples/hosted-agent/python/agent/main.py create mode 100644 samples/hosted-agent/python/agent/requirements.txt create mode 100644 samples/hosted-agent/python/minimal/.dockerignore create mode 100644 samples/hosted-agent/python/minimal/Dockerfile create mode 100644 samples/hosted-agent/python/workflow/.dockerignore create mode 100644 samples/hosted-agent/python/workflow/.env.tpl create mode 100644 samples/hosted-agent/python/workflow/.vscode/launch.json create mode 100644 samples/hosted-agent/python/workflow/.vscode/tasks.json create mode 100644 samples/hosted-agent/python/workflow/Dockerfile create mode 100644 samples/hosted-agent/python/workflow/README.md create mode 100644 samples/hosted-agent/python/workflow/agent.yaml.tpl create mode 100644 samples/hosted-agent/python/workflow/main.py create mode 100644 samples/hosted-agent/python/workflow/requirements.txt create mode 100644 samples/hosted-agent/version-manifest.json delete mode 100644 samples/workflows/simple_workflow.py diff --git a/.github/workflows/package-hosted-agents.yml b/.github/workflows/package-hosted-agents.yml new file mode 100644 index 0000000..9ec0bb8 --- /dev/null +++ b/.github/workflows/package-hosted-agents.yml @@ -0,0 +1,48 @@ +name: Package Hosted Agents + +on: + push: + paths: + - "samples/hosted-agent/**" + branches: + - hui/workflow-samples + - main + +permissions: + contents: write + +jobs: + package-agents: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Create dotnet agent package + run: | + cd samples/hosted-agent + zip -r dotnet-hosted-agents.zip dotnet/ + ls -lh dotnet-hosted-agents.zip + + - name: Create python agent package + run: | + cd samples/hosted-agent + zip -r python-hosted-agents.zip python/ + ls -lh python-hosted-agents.zip + + - name: Upload packages to Release + uses: softprops/action-gh-release@v1 + with: + files: | + samples/hosted-agent/dotnet-hosted-agents.zip + samples/hosted-agent/python-hosted-agents.zip + generate_release_notes: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload packages as artifacts + uses: actions/upload-artifact@v3 + with: + name: hosted-agents-packages + path: samples/hosted-agent/*-hosted-agents.zip + retention-days: 30 diff --git a/samples/hosted-agent/dotnet/agent/.dockerignore b/samples/hosted-agent/dotnet/agent/.dockerignore new file mode 100644 index 0000000..6bfa65a --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/.dockerignore @@ -0,0 +1,57 @@ +# Build outputs +bin/ +obj/ +out/ + +# IDE and editor files +.vs/ +.vscode/ +*.user +*.suo +*.sln.docstates +.foundry/ + +# Git +.git/ +.gitignore + +# Documentation and samples (not needed in container) +*.md +*.http + +# Ignore files +.dockerignore + +# Logs +*.log + +# Temporary files +*.tmp +*.temp + +# OS files +.DS_Store +Thumbs.db + +# Package manager directories +node_modules/ +packages/ + +# Test results +TestResults/ +*.trx + +# Coverage reports +coverage/ +*.coverage +*.coveragexml + +# Environment files with secrets +.env +.env.* +*.local +appsettings.*.json +!appsettings.json + +.venv/ +__pycache__/ diff --git a/samples/hosted-agent/dotnet/agent/.env.tpl b/samples/hosted-agent/dotnet/agent/.env.tpl new file mode 100644 index 0000000..32b9dee --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/.env.tpl @@ -0,0 +1,4 @@ +Azure__ProjectEndpoint="{{{AzureAIProjectEndpoint}}}" + +# Replace with your model deployment name, by default using gpt-4.1-mini +Azure__ModelDeploymentName=gpt-4.1-mini diff --git a/samples/hosted-agent/dotnet/agent/Dockerfile.tpl b/samples/hosted-agent/dotnet/agent/Dockerfile.tpl new file mode 100644 index 0000000..6a9cd12 --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/Dockerfile.tpl @@ -0,0 +1,20 @@ +# Build the application +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src + +# Copy files from the current directory on the host to the working directory in the container +COPY . . + +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app + +# Run the application +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app + +# Copy everything needed to run the app from the "build" stage. +COPY --from=build /app . + +EXPOSE 8088 +ENTRYPOINT ["dotnet", "{{SafeProjectName}}.dll"] diff --git a/samples/hosted-agent/dotnet/agent/Program.cs.tpl b/samples/hosted-agent/dotnet/agent/Program.cs.tpl new file mode 100644 index 0000000..5b28596 --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/Program.cs.tpl @@ -0,0 +1,136 @@ +// Seattle Hotel Agent - A simple agent with a tool to find hotels in Seattle. +// Uses Microsoft Agent Framework with Azure AI Foundry. +// Ready for deployment to Foundry Hosted Agent service. + +using System.ComponentModel; +using System.Globalization; +using System.Text; +using System.ClientModel.Primitives; +using Azure.AI.AgentServer.AgentFramework.Extensions; +using Azure.AI.OpenAI; +using Azure.AI.Projects; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Extensions.AI; + +// Get configuration from environment variables +var endpoint = Environment.GetEnvironmentVariable("AZURE_AI_PROJECT_ENDPOINT") + ?? throw new InvalidOperationException("AZURE_AI_PROJECT_ENDPOINT is not set."); +var deploymentName = Environment.GetEnvironmentVariable("MODEL_DEPLOYMENT_NAME") ?? "gpt-4.1-mini"; +Console.WriteLine($"Project Endpoint: {endpoint}"); +Console.WriteLine($"Model Deployment: {deploymentName}"); +// Simulated hotel data for Seattle +var seattleHotels = new[] +{ + new Hotel("Contoso Suites", 189, 4.5, "Downtown"), + new Hotel("Fabrikam Residences", 159, 4.2, "Pike Place Market"), + new Hotel("Alpine Ski House", 249, 4.7, "Seattle Center"), + new Hotel("Margie's Travel Lodge", 219, 4.4, "Waterfront"), + new Hotel("Northwind Inn", 139, 4.0, "Capitol Hill"), + new Hotel("Relecloud Hotel", 99, 3.8, "University District"), +}; + +[Description("Get available hotels in Seattle for the specified dates. This simulates a call to a hotel availability API.")] +string GetAvailableHotels( + [Description("Check-in date in YYYY-MM-DD format")] string checkInDate, + [Description("Check-out date in YYYY-MM-DD format")] string checkOutDate, + [Description("Maximum price per night in USD (optional, defaults to 500)")] int maxPrice = 500) +{ + try + { + // Parse dates + if (!DateTime.TryParseExact(checkInDate, "yyyy-MM-dd", CultureInfo.InvariantCulture, DateTimeStyles.None, out var checkIn)) + { + return $"Error parsing check-in date. Please use YYYY-MM-DD format."; + } + + if (!DateTime.TryParseExact(checkOutDate, "yyyy-MM-dd", CultureInfo.InvariantCulture, DateTimeStyles.None, out var checkOut)) + { + return $"Error parsing check-out date. Please use YYYY-MM-DD format."; + } + + // Validate dates + if (checkOut <= checkIn) + { + return "Error: Check-out date must be after check-in date."; + } + + var nights = (checkOut - checkIn).Days; + + // Filter hotels by price + var availableHotels = seattleHotels.Where(h => h.PricePerNight <= maxPrice).ToList(); + + if (availableHotels.Count == 0) + { + return $"No hotels found in Seattle within your budget of ${maxPrice}/night."; + } + + // Build response + var result = new StringBuilder(); + result.AppendLine($"Available hotels in Seattle from {checkInDate} to {checkOutDate} ({nights} nights):"); + result.AppendLine(); + + foreach (var hotel in availableHotels) + { + var totalCost = hotel.PricePerNight * nights; + result.AppendLine($"**{hotel.Name}**"); + result.AppendLine($" Location: {hotel.Location}"); + result.AppendLine($" Rating: {hotel.Rating}/5"); + result.AppendLine($" ${hotel.PricePerNight}/night (Total: ${totalCost})"); + result.AppendLine(); + } + + return result.ToString(); + } + catch (Exception ex) + { + return $"Error processing request. Details: {ex.Message}"; + } +} + +// Create chat client using AIProjectClient to get the OpenAI connection from the project +var credential = new DefaultAzureCredential(); +AIProjectClient projectClient = new AIProjectClient(new Uri(endpoint), credential); + +// Get the OpenAI connection from the project +ClientConnection connection = projectClient.GetConnection(typeof(AzureOpenAIClient).FullName!); + +if (!connection.TryGetLocatorAsUri(out Uri? openAiEndpoint) || openAiEndpoint is null) +{ + throw new InvalidOperationException("Failed to get OpenAI endpoint from project connection."); +} +openAiEndpoint = new Uri($"https://{openAiEndpoint.Host}"); +Console.WriteLine($"OpenAI Endpoint: {openAiEndpoint}"); + +var chatClient = new AzureOpenAIClient(openAiEndpoint, credential) + .GetChatClient(deploymentName) + .AsIChatClient() + .AsBuilder() + .UseOpenTelemetry(sourceName: "Agents", configure: cfg => cfg.EnableSensitiveData = false) + .Build(); + +var agent = new ChatClientAgent(chatClient, + name: "SeattleHotelAgent", + instructions: """ + You are a helpful travel assistant specializing in finding hotels in Seattle, Washington. + + When a user asks about hotels in Seattle: + 1. Ask for their check-in and check-out dates if not provided + 2. Ask about their budget preferences if not mentioned + 3. Use the GetAvailableHotels tool to find available options + 4. Present the results in a friendly, informative way + 5. Offer to help with additional questions about the hotels or Seattle + + Be conversational and helpful. If users ask about things outside of Seattle hotels, + politely let them know you specialize in Seattle hotel recommendations. + """, + tools: [AIFunctionFactory.Create(GetAvailableHotels)]) + .AsBuilder() + .UseOpenTelemetry(sourceName: "Agents", configure: cfg => cfg.EnableSensitiveData = false) + .Build(); + +Console.WriteLine("Seattle Hotel Agent Server running on http://localhost:8088"); +await agent.RunAIAgentAsync(telemetrySourceName: "Agents"); + +// Hotel record for simulated data +record Hotel(string Name, int PricePerNight, double Rating, string Location); diff --git a/samples/hosted-agent/dotnet/agent/README.md b/samples/hosted-agent/dotnet/agent/README.md new file mode 100644 index 0000000..4039a84 --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/README.md @@ -0,0 +1,166 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + +# What this sample demonstrates + +This sample demonstrates a **key advantage of code-based hosted agents**: + +- **Local C# tool execution** - Run custom C# methods as agent tools + +Code-based agents can execute **any C# code** you write. This sample includes a Seattle Hotel Agent with a `GetAvailableHotels` tool that searches for available hotels based on check-in/check-out dates and budget preferences. + +The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme) and can be deployed to Microsoft Foundry. + +## How It Works + +### Local Tools Integration + +In [Program.cs](Program.cs), the agent uses a local C# method (`GetAvailableHotels`) that simulates a hotel availability API. This demonstrates how code-based agents can execute custom server-side logic that prompt agents cannot access. + +The tool accepts: + +- **checkInDate** - Check-in date in YYYY-MM-DD format +- **checkOutDate** - Check-out date in YYYY-MM-DD format +- **maxPrice** - Maximum price per night in USD (optional, defaults to $500) + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer SDK](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/ai.agentserver.agentframework-readme), +which provisions a REST API endpoint compatible with the OpenAI Responses protocol. + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure AI Foundry Project** + - Project created. + - Chat model deployed (e.g., `gpt-4o` or `gpt-4.1`) + - Note your project endpoint URL and model deployment name + +2. **Azure CLI** + - Installed and authenticated + - Run `az login` and verify with `az account show` + +3. **.NET 10.0 SDK or later** + - Verify your version: `dotnet --version` + - Download from [https://dotnet.microsoft.com/download](https://dotnet.microsoft.com/download) + +### Environment Variables + +Set the following environment variables (matching `agent.yaml`): + +- `AZURE_AI_PROJECT_ENDPOINT` - Your Azure AI Foundry project endpoint URL (required) +- `MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (defaults to `gpt-4.1-mini`) + +**PowerShell:** + +```powershell +# Replace with your actual values +$env:AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +$env:MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +**Bash:** + +```bash +export AZURE_AI_PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +### Running the Sample + +To run the agent, execute the following command in your terminal: + +```bash +dotnet restore +dotnet build +dotnet run +``` + +This will start the hosted agent locally on `http://localhost:8088/`. + +### Interacting with the Agent + +**VS Code:** + +1. Open the Visual Studio Code Command Palette and execute the `Microsoft Foundry: Open Container Agent Playground Locally` command. +2. Execute the following commands to start the containerized hosted agent. + + ```bash + dotnet restore + dotnet build + dotnet run + ``` + +3. Submit a request to the agent through the playground interface. For example, you may enter a prompt such as: "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under $200 per night." +4. The agent will use the GetAvailableHotels tool to search for available hotels matching your criteria. + +> **Note**: Open the local playground before starting the container agent to ensure the visualization functions correctly. + +**PowerShell (Windows):** + +```powershell +$body = @{ + input = "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under `$200 per night" + stream = $false +} | ConvertTo-Json + +Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" +``` + +**Bash/curl (Linux/macOS):** + +```bash +curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ + -d '{"input": "Find me hotels in Seattle for March 20-23, 2025 under $200 per night","stream":false}' +``` + +You can also use the `run-requests.http` file in this directory with the VS Code REST Client extension. + +The agent will use the `GetAvailableHotels` tool to search for available hotels matching your criteria. + +## Deploying the Agent to Microsoft Foundry + +**Preparation (required)** + +Please check the environment_variables section in [agent.yaml](agent.yaml) and ensure the variables there are set in your target Microsoft Foundry Project. + +To deploy the hosted agent: + +1. Open the VS Code Command Palette and run the `Microsoft Foundry: Deploy Hosted Agent` command. +2. Follow the interactive deployment prompts. The extension will help you select or create the container files it needs. +3. After deployment completes, the hosted agent appears under the `Hosted Agents (Preview)` section of the extension tree. You can select the agent there to view details and test it using the integrated playground. + +**What the deploy flow does for you:** + +- Creates or obtains an Azure Container Registry for the target project. +- Builds and pushes a container image from your workspace (the build packages the workspace respecting `.dockerignore`). +- Creates an agent version in Microsoft Foundry using the built image. If a `.env` file exists at the workspace root, the extension will parse it and include its key/value pairs as the hosted agent's environment variables in the create request (these variables will be available to the agent runtime). +- Starts the agent container on the project's capability host. If the capability host is not provisioned, the extension will prompt you to enable it and will guide you through creating it. + +## MSI Configuration in the Azure Portal + +This sample requires the Microsoft Foundry Project to authenticate using a Managed Identity when running remotely in Azure. Grant the project's managed identity the required permissions by assigning the built-in [Azure AI User](https://aka.ms/foundry-ext-project-role) role. + +To configure the Managed Identity: + +1. In the Azure Portal, open the Foundry Project. +2. Select "Access control (IAM)" from the left-hand menu. +3. Click "Add" and choose "Add role assignment". +4. In the role selection, search for and select "Azure AI User", then click "Next". +5. For "Assign access to", choose "Managed identity". +6. Click "Select members", locate the managed identity associated with your Foundry Project (you can search by the project name), then click "Select". +7. Click "Review + assign" to complete the assignment. +8. Allow a few minutes for the role assignment to propagate before running the application. + +## Additional Resources + +- [Microsoft Agents Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview) +- [Managed Identities for Azure Resources](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/) diff --git a/samples/hosted-agent/dotnet/agent/agent.yaml.tpl b/samples/hosted-agent/dotnet/agent/agent.yaml.tpl new file mode 100644 index 0000000..014464d --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/agent.yaml.tpl @@ -0,0 +1,25 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml + +kind: hosted +name: {{AgentName}} +description: > + A travel assistant agent that helps users find hotels in Seattle. + Demonstrates local C# tool execution - a key advantage of code-based + hosted agents over prompt agents. +metadata: + authors: + - Microsoft + tags: + - Azure AI AgentServer + - Microsoft Agent Framework + - Local Tools + - Travel Assistant + - Hotel Search +protocols: + - protocol: responses + version: v1 +environment_variables: + - name: PROJECT_ENDPOINT + value: {{=<% %>=}}"{{AZURE_AI_PROJECT_ENDPOINT}}"<%={{ }}=%> + - name: MODEL_DEPLOYMENT_NAME + value: {{=<% %>=}}"{{MODEL_DEPLOYMENT_NAME}}"<%={{ }}=%> \ No newline at end of file diff --git a/samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl b/samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl new file mode 100644 index 0000000..8a9170c --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl @@ -0,0 +1,6 @@ +{ + "Azure": { + "ProjectEndpoint": "{{{AzureAIProjectEndpoint}}}", + "ModelDeploymentName": "gpt-4.1-mini" + } +} diff --git a/samples/hosted-agent/dotnet/agent/run-requests.http b/samples/hosted-agent/dotnet/agent/run-requests.http new file mode 100644 index 0000000..4f2e87e --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/run-requests.http @@ -0,0 +1,52 @@ +@host = http://localhost:8088 +@endpoint = {{host}}/responses + +### Health Check +GET {{host}}/readiness + +### Simple hotel search - budget under $200 +POST {{endpoint}} +Content-Type: application/json + +{ + "input": "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under $200 per night", + "stream": false +} + +### Hotel search with higher budget +POST {{endpoint}} +Content-Type: application/json + +{ + "input": "Find me hotels in Seattle for March 20-23, 2025 under $250 per night", + "stream": false +} + +### Ask for recommendations without dates (agent should ask for clarification) +POST {{endpoint}} +Content-Type: application/json + +{ + "input": "What hotels do you recommend in Seattle?", + "stream": false +} + +### Explicit input format +POST {{endpoint}} +Content-Type: application/json + +{ + "input": [ + { + "type": "message", + "role": "user", + "content": [ + { + "type": "input_text", + "text": "I'm looking for a hotel in Seattle from 2025-04-01 to 2025-04-05, my budget is $150 per night maximum" + } + ] + } + ], + "stream": false +} diff --git a/samples/hosted-agent/dotnet/agent/{{SafeProjectName}}.csproj b/samples/hosted-agent/dotnet/agent/{{SafeProjectName}}.csproj new file mode 100644 index 0000000..d714c09 --- /dev/null +++ b/samples/hosted-agent/dotnet/agent/{{SafeProjectName}}.csproj @@ -0,0 +1,16 @@ + + + Exe + net10.0 + enable + enable + true + + + + + + + + + diff --git a/samples/hosted-agent/dotnet/minimal/.dockerignore b/samples/hosted-agent/dotnet/minimal/.dockerignore new file mode 100644 index 0000000..79cc807 --- /dev/null +++ b/samples/hosted-agent/dotnet/minimal/.dockerignore @@ -0,0 +1,51 @@ +# Build artifacts +bin/ +obj/ + +# IDE and editor files +.vs/ +.vscode/ +*.user +*.suo +.foundry/ + +# Source control +.git/ + +# Documentation +README.md + +# Ignore files +.gitignore +.dockerignore + +# Logs +*.log + +# Temporary files +*.tmp +*.temp + +# OS files +.DS_Store +Thumbs.db + +# Package manager directories +node_modules/ +packages/ + +# Test results +TestResults/ +*.trx + +# Coverage reports +coverage/ +*.coverage +*.coveragexml + +# Local development config +appsettings.Development.json +.env + +.venv/ +__pycache__/ diff --git a/samples/hosted-agent/dotnet/minimal/Dockerfile b/samples/hosted-agent/dotnet/minimal/Dockerfile new file mode 100644 index 0000000..e4091cf --- /dev/null +++ b/samples/hosted-agent/dotnet/minimal/Dockerfile @@ -0,0 +1,21 @@ +# Build the application +FROM mcr.microsoft.com/dotnet/sdk:10.0-alpine AS build +WORKDIR /src + +# Copy files from the current directory on the host to the working directory in the container +COPY . . + +# Restore packages +RUN dotnet restore +RUN dotnet build -c Release --no-restore +RUN dotnet publish -c Release --no-build -o /app -p:AssemblyName=app + +# Run the application +FROM mcr.microsoft.com/dotnet/aspnet:10.0-alpine AS final +WORKDIR /app + +# Copy everything needed to run the app from the "build" stage. +COPY --from=build /app . + +EXPOSE 8088 +ENTRYPOINT ["dotnet", "app.dll"] diff --git a/samples/hosted-agent/dotnet/workflow/.dockerignore b/samples/hosted-agent/dotnet/workflow/.dockerignore new file mode 100644 index 0000000..6bfa65a --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/.dockerignore @@ -0,0 +1,57 @@ +# Build outputs +bin/ +obj/ +out/ + +# IDE and editor files +.vs/ +.vscode/ +*.user +*.suo +*.sln.docstates +.foundry/ + +# Git +.git/ +.gitignore + +# Documentation and samples (not needed in container) +*.md +*.http + +# Ignore files +.dockerignore + +# Logs +*.log + +# Temporary files +*.tmp +*.temp + +# OS files +.DS_Store +Thumbs.db + +# Package manager directories +node_modules/ +packages/ + +# Test results +TestResults/ +*.trx + +# Coverage reports +coverage/ +*.coverage +*.coveragexml + +# Environment files with secrets +.env +.env.* +*.local +appsettings.*.json +!appsettings.json + +.venv/ +__pycache__/ diff --git a/samples/hosted-agent/dotnet/workflow/.env.tpl b/samples/hosted-agent/dotnet/workflow/.env.tpl new file mode 100644 index 0000000..2ce907d --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/.env.tpl @@ -0,0 +1,4 @@ +# IMPORTANT: Never commit .env to version control - add it to .gitignore + +PROJECT_ENDPOINT={{{AzureAIProjectEndpoint}}} +MODEL_DEPLOYMENT_NAME={{ModelDeploymentName}} \ No newline at end of file diff --git a/samples/hosted-agent/dotnet/workflow/Dockerfile.tpl b/samples/hosted-agent/dotnet/workflow/Dockerfile.tpl new file mode 100644 index 0000000..31bbacb --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/Dockerfile.tpl @@ -0,0 +1,22 @@ +# Build the application +FROM mcr.microsoft.com/dotnet/sdk:9.0 AS build +WORKDIR /src + +# Copy project files for dependency resolution +COPY *.csproj* . +RUN dotnet restore {{SafeProjectName}}.csproj + +# Copy files from the current directory on the host to the working directory in the container +COPY . . + +RUN dotnet publish -c Release -o /app -p:AssemblyName=app + +# Run the application +FROM mcr.microsoft.com/dotnet/aspnet:9.0 +WORKDIR /app + +# Copy everything needed to run the app from the "build" stage. +COPY --from=build /app . + +EXPOSE 8088 +ENTRYPOINT ["dotnet", "app.dll"] diff --git a/samples/hosted-agent/dotnet/workflow/Program.cs.tpl b/samples/hosted-agent/dotnet/workflow/Program.cs.tpl new file mode 100644 index 0000000..318f15e --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/Program.cs.tpl @@ -0,0 +1,166 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.AI.Agents.Persistent; +using Azure.AI.AgentServer.AgentFramework.Extensions; +using Azure.Core; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Agents.AI.Workflows; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.Configuration; +using OpenTelemetry; +using OpenTelemetry.Resources; +using OpenTelemetry.Trace; + +namespace {{SafeProjectName}}; + +internal static class Program +{ + private static TracerProvider? s_tracerProvider; + + private static async Task Main(string[] args) + { + try + { + // Enable OpenTelemetry tracing for visualization + ConfigureObservability(); + + await RunAsync().ConfigureAwait(false); + } + catch (Exception e) + { + Console.WriteLine($"Critical error: {e}"); + } + } + + private static async ValueTask RunAsync() + { + // Build configuration + var configuration = new ConfigurationBuilder() + .SetBasePath(Directory.GetCurrentDirectory()) + .AddJsonFile("appsettings.Development.json", optional: true) + .AddEnvironmentVariables() + .Build(); + + var endpoint = + configuration["PROJECT_ENDPOINT"] + ?? throw new InvalidOperationException( + "PROJECT_ENDPOINT is required. Set it in appsettings.Development.json for local development or as PROJECT_ENDPOINT environment variable for production"); + var deployment = + configuration["MODEL_DEPLOYMENT_NAME"] + ?? throw new InvalidOperationException( + "MODEL_DEPLOYMENT_NAME is required. Set it in appsettings.Development.json for local development or as MODEL_DEPLOYMENT_NAME environment variable for containers"); + + Console.WriteLine($"Using Azure AI endpoint: {endpoint}"); + Console.WriteLine($"Using model deployment: {deployment}"); + + // Create credential - use ManagedIdentityCredential if MSI_ENDPOINT exists, otherwise DefaultAzureCredential + TokenCredential credential = string.IsNullOrEmpty(Environment.GetEnvironmentVariable("MSI_ENDPOINT")) + ? new DefaultAzureCredential() + : new ManagedIdentityCredential(); + + // Create separate PersistentAgentsClient for each agent + var writerClient = new PersistentAgentsClient(endpoint, credential); + var reviewerClient = new PersistentAgentsClient(endpoint, credential); + + (ChatClientAgent agent, string id)? writer = null; + (ChatClientAgent agent, string id)? reviewer = null; + + try + { + // Create Foundry agents with separate clients + writer = await CreateAgentAsync( + writerClient, + deployment, + "Writer", + "You are an excellent content writer. You create new content and edit contents based on the feedback." + ); + reviewer = await CreateAgentAsync( + reviewerClient, + deployment, + "Reviewer", + "You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content. Provide the feedback in the most concise manner possible." + ); + Console.WriteLine(); + + var workflow = new WorkflowBuilder(writer.Value.agent) + .AddEdge(writer.Value.agent, reviewer.Value.agent) + .WithOutputFrom(reviewer.Value.agent) + .Build(); + + Console.WriteLine("Starting Writer-Reviewer Workflow Agent Server on http://localhost:8088"); + await workflow.AsAgent().RunAIAgentAsync(); + } + catch (Exception ex) + { + Console.WriteLine($"Error running workflow: {ex.Message}"); + throw; + } + finally + { + // Clean up all resources + await CleanupAsync(writerClient, writer?.id); + await CleanupAsync(reviewerClient, reviewer?.id); + + if (credential is IDisposable disposable) + { + disposable.Dispose(); + } + } + } + + private static async Task<(ChatClientAgent agent, string id)> CreateAgentAsync( + PersistentAgentsClient client, + string model, + string name, + string instructions) + { + var agentMetadata = await client.Administration.CreateAgentAsync( + model: model, + name: name, + instructions: instructions + ); + + var chatClient = client.AsIChatClient(agentMetadata.Value.Id); + return (new ChatClientAgent(chatClient), agentMetadata.Value.Id); + } + + private static async Task CleanupAsync(PersistentAgentsClient client, string? agentId) + { + if (string.IsNullOrEmpty(agentId)) + { + return; + } + + try + { + await client.Administration.DeleteAgentAsync(agentId); + } + catch (Exception e) + { + Console.WriteLine($"Cleanup failed for agent {agentId}: {e.Message}"); + } + } + + private static void ConfigureObservability() + { + var otlpEndpoint = + Environment.GetEnvironmentVariable("OTLP_ENDPOINT") ?? "http://localhost:4319"; + + var resourceBuilder = ResourceBuilder.CreateDefault() + .AddService("WorkflowSample"); + + s_tracerProvider = Sdk.CreateTracerProviderBuilder() + .SetResourceBuilder(resourceBuilder) + .AddSource("Microsoft.Agents.AI.*") // All agent framework sources + .SetSampler(new AlwaysOnSampler()) // Ensure all traces are sampled + .AddOtlpExporter(options => + { + options.Endpoint = new Uri(otlpEndpoint); + options.Protocol = OpenTelemetry.Exporter.OtlpExportProtocol.Grpc; + }) + .Build(); + + Console.WriteLine($"OpenTelemetry configured. OTLP endpoint: {otlpEndpoint}"); + } +} diff --git a/samples/hosted-agent/dotnet/workflow/README.md b/samples/hosted-agent/dotnet/workflow/README.md new file mode 100644 index 0000000..fde0fb2 --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/README.md @@ -0,0 +1,169 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + +# What this sample demonstrates + +This sample demonstrates a **key advantage of code-based hosted agents**: + +- **Multi-agent workflows** - Orchestrate multiple agents working together + +Code-based agents can execute **any C# code** you write. This sample includes a Writer-Reviewer workflow where two agents collaborate: a Writer creates content and a Reviewer provides feedback. + +The agent is hosted using the [Azure AI AgentServer SDK](https://www.nuget.org/packages/Azure.AI.AgentServer.AgentFramework/) and can be deployed to Microsoft Foundry. + +## How It Works + +### Multi-Agent Workflow + +In [Program.cs](Program.cs), the sample creates two agents using `PersistentAgentsClient`: + +- **Writer** - An agent that creates and edits content based on feedback +- **Reviewer** - An agent that provides actionable feedback on the content + +The `WorkflowBuilder` connects these agents in a sequential flow: + +1. The Writer receives the initial request and generates content +2. The Reviewer evaluates the content and provides feedback +3. Both agent responses are output to the user + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer SDK](https://www.nuget.org/packages/Azure.AI.AgentServer.AgentFramework/), +which provisions a REST API endpoint compatible with the OpenAI Responses protocol. + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Azure AI Foundry Project** + - Project created. + - Chat model deployed (e.g., `gpt-4o` or `gpt-4.1`) + - Note your project endpoint URL and model deployment name + > **Note**: You can right-click the project in the Microsoft Foundry VS Code extension and select `Copy Project Endpoint URL` to get the endpoint. + +2. **Azure CLI** + - Installed and authenticated + - Run `az login` and verify with `az account show` + +3. **.NET 10.0 SDK or later** + - Verify your version: `dotnet --version` + - Download from [https://dotnet.microsoft.com/download](https://dotnet.microsoft.com/download) + +### Environment Variables + +**Foundry VS Code Extension Users:** + +If you created your hosted agent project using the Microsoft Foundry VS Code extension, a `appsettings.Development.json` file is already created in the project root with the necessary environment variables. Double-check that the values are correct. + +**PowerShell:** + +```powershell +# Replace with your actual values +$env:PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +$env:MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +**Bash:** + +```bash +export PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +export MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +### Running the Sample + +To run the agent, execute the following command in your terminal: + +```bash +dotnet restore +dotnet build +dotnet run +``` + +This will start the hosted agent locally on `http://localhost:8088/`. + +### Interacting with the Agent + +**VS Code:** + +1. Open the Visual Studio Code Command Palette and execute the `Microsoft Foundry: Open Container Agent Playground Locally` command. +2. Execute the following commands to start the containerized hosted agent. + ```bash + dotnet restore + dotnet build + dotnet run + ``` +3. Submit a request to the agent through the playground interface. For example, you may enter a prompt such as: "Create a slogan for a new electric SUV that is affordable and fun to drive." +4. Review the agent's response in the playground interface. + +> **Note**: Open the local playground before starting the container agent to ensure the visualization functions correctly. + +**PowerShell (Windows):** + +```powershell +$body = @{ + input = "Create a slogan for a new electric SUV that is affordable and fun to drive" + stream = $false +} | ConvertTo-Json + +Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" +``` + +**Bash/curl (Linux/macOS):** + +```bash +curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ + -d '{"input": "Create a slogan for a new electric SUV that is affordable and fun to drive","stream":false}' +``` + +You can also use the `run-requests.http` file in this directory with the VS Code REST Client extension. + +The Writer agent will generate content based on your prompt, and the Reviewer agent will provide feedback on the output. + +## Deploying the Agent to Microsoft Foundry + +**Preparation (required)** + +Please check the environment_variables section in [agent.yaml](agent.yaml) and ensure the variables there are set in your target Microsoft Foundry Project. + +To deploy the hosted agent: + +1. Open the VS Code Command Palette and run the `Microsoft Foundry: Deploy Hosted Agent` command. + +2. Follow the interactive deployment prompts. The extension will help you select or create the container files it needs. + +3. After deployment completes, the hosted agent appears under the `Hosted Agents (Preview)` section of the extension tree. You can select the agent there to view details and test it using the integrated playground. + +**What the deploy flow does for you:** + +- Creates or obtains an Azure Container Registry for the target project. +- Builds and pushes a container image from your workspace (the build packages the workspace respecting `.dockerignore`). +- Creates an agent version in Microsoft Foundry using the built image. If a `.env` file exists at the workspace root, the extension will parse it and include its key/value pairs as the hosted agent's environment variables in the create request (these variables will be available to the agent runtime). +- Starts the agent container on the project's capability host. If the capability host is not provisioned, the extension will prompt you to enable it and will guide you through creating it. + +## MSI Configuration in the Azure Portal + +This sample requires the Microsoft Foundry Project to authenticate using a Managed Identity when running remotely in Azure. Grant the project's managed identity the required permissions by assigning the built-in [Azure AI User](https://aka.ms/foundry-ext-project-role) role. + +To configure the Managed Identity: + +1. In the Azure Portal, open the Foundry Project. +2. Select "Access control (IAM)" from the left-hand menu. +3. Click "Add" and choose "Add role assignment". +4. In the role selection, search for and select "Azure AI User", then click "Next". +5. For "Assign access to", choose "Managed identity". +6. Click "Select members", locate the managed identity associated with your Foundry Project (you can search by the project name), then click "Select". +7. Click "Review + assign" to complete the assignment. +8. Allow a few minutes for the role assignment to propagate before running the application. + +## Additional Resources + +- [Microsoft Agents Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview) +- [Managed Identities for Azure Resources](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/) diff --git a/samples/hosted-agent/dotnet/workflow/agent.yaml.tpl b/samples/hosted-agent/dotnet/workflow/agent.yaml.tpl new file mode 100644 index 0000000..054f56e --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/agent.yaml.tpl @@ -0,0 +1,24 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml + +kind: hosted +name: {{AgentName}} +description: > + A multi-agent workflow featuring a Writer and Reviewer that collaborate + to create and refine content. +metadata: + authors: + - Microsoft + tags: + - Azure AI AgentServer + - Microsoft Agent Framework + - Multi-Agent Workflow + - Writer-Reviewer + - Content Creation +protocols: + - protocol: responses + version: v1 +environment_variables: + - name: PROJECT_ENDPOINT + value: {{=<% %>=}}"{{PROJECT_ENDPOINT}}"<%={{ }}=%> + - name: MODEL_DEPLOYMENT_NAME + value: {{=<% %>=}}"{{MODEL_DEPLOYMENT_NAME}}"<%={{ }}=%> diff --git a/samples/hosted-agent/dotnet/workflow/appsettings.Development.json.tpl b/samples/hosted-agent/dotnet/workflow/appsettings.Development.json.tpl new file mode 100644 index 0000000..3545752 --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/appsettings.Development.json.tpl @@ -0,0 +1,4 @@ +{ + "PROJECT_ENDPOINT": "{{{AzureAIProjectEndpoint}}}", + "MODEL_DEPLOYMENT_NAME": "{{ModelDeploymentName}}" +} diff --git a/samples/hosted-agent/dotnet/workflow/{{SafeProjectName}}.csproj b/samples/hosted-agent/dotnet/workflow/{{SafeProjectName}}.csproj new file mode 100644 index 0000000..d884ae2 --- /dev/null +++ b/samples/hosted-agent/dotnet/workflow/{{SafeProjectName}}.csproj @@ -0,0 +1,30 @@ + + + Exe + net9.0 + enable + enable + + + + + + + + + + + + + + + + + + PreserveNewest + + + diff --git a/samples/hosted-agent/python/agent/.dockerignore b/samples/hosted-agent/python/agent/.dockerignore new file mode 100644 index 0000000..779bc67 --- /dev/null +++ b/samples/hosted-agent/python/agent/.dockerignore @@ -0,0 +1,66 @@ +# Virtual environments +.venv/ +venv/ +env/ +.python-version + +# Environment files with secrets +.env +.env.* +*.local + +# Python build artifacts +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Testing +.tox/ +.nox/ +.coverage +.coverage.* +htmlcov/ +.pytest_cache/ +.mypy_cache/ + +# IDE and OS files +.DS_Store +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Foundry config +.foundry/ +build-source-*/ + +# Git +.git/ +.gitignore + +# Docker +.dockerignore + +# Documentation +docs/ +*.md +!README.md +LICENSE diff --git a/samples/hosted-agent/python/agent/.env.tpl b/samples/hosted-agent/python/agent/.env.tpl new file mode 100644 index 0000000..2ce907d --- /dev/null +++ b/samples/hosted-agent/python/agent/.env.tpl @@ -0,0 +1,4 @@ +# IMPORTANT: Never commit .env to version control - add it to .gitignore + +PROJECT_ENDPOINT={{{AzureAIProjectEndpoint}}} +MODEL_DEPLOYMENT_NAME={{ModelDeploymentName}} \ No newline at end of file diff --git a/samples/hosted-agent/python/agent/.vscode/launch.json b/samples/hosted-agent/python/agent/.vscode/launch.json new file mode 100644 index 0000000..fe92f4a --- /dev/null +++ b/samples/hosted-agent/python/agent/.vscode/launch.json @@ -0,0 +1,17 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug Local Workflow HTTP Server", + "type": "debugpy", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5679 + }, + "preLaunchTask": "Open Agent Inspector", + "internalConsoleOptions": "neverOpen", + "postDebugTask": "Terminate All Tasks" + } + ] +} diff --git a/samples/hosted-agent/python/agent/.vscode/tasks.json b/samples/hosted-agent/python/agent/.vscode/tasks.json new file mode 100644 index 0000000..4dbd4ea --- /dev/null +++ b/samples/hosted-agent/python/agent/.vscode/tasks.json @@ -0,0 +1,70 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Validate prerequisites", + "type": "aitk", + "command": "debug-check-prerequisites", + "args": { + "portOccupancy": [5679, 8088] + } + }, + { + "label": "Run Agent/Workflow HTTP Server", + "type": "shell", + "command": "${command:python.interpreterPath} -m debugpy --listen 127.0.0.1:5679 -m agentdev run main.py --verbose --port 8088", + "isBackground": true, + "options": { + "cwd": "${workspaceFolder}" + }, + "dependsOn": ["Validate prerequisites"], + "problemMatcher": { + "pattern": [ + { + "regexp": "^.*$", + "file": 0, + "location": 1, + "message": 2 + } + ], + "background": { + "activeOnStart": true, + "beginsPattern": ".*", + "endsPattern": "Application startup complete|running on|Started server process" + } + } + }, + { + "label": "Open Agent Inspector", + "type": "shell", + "command": "echo '${input:openAgentInspector}'", + "presentation": { + "reveal": "never" + }, + "dependsOn": ["Run Agent/Workflow HTTP Server"] + }, + { + "label": "Terminate All Tasks", + "command": "echo ${input:terminate}", + "type": "shell", + "problemMatcher": [] + } + ], + "inputs": [ + { + "id": "openAgentInspector", + "type": "command", + "command": "ai-mlstudio.openTestTool", + "args": { + "triggeredFrom": "tasks", + "port": 8088 + } + }, + { + "id": "terminate", + "type": "command", + "command": "workbench.action.tasks.terminate", + "args": "terminateAll" + } + ] +} diff --git a/samples/hosted-agent/python/agent/Dockerfile b/samples/hosted-agent/python/agent/Dockerfile new file mode 100644 index 0000000..413c6ac --- /dev/null +++ b/samples/hosted-agent/python/agent/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY ./ . + +RUN if [ -f requirements.txt ]; then \ + pip install --no-cache-dir -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] diff --git a/samples/hosted-agent/python/agent/README.md b/samples/hosted-agent/python/agent/README.md new file mode 100644 index 0000000..fc6e876 --- /dev/null +++ b/samples/hosted-agent/python/agent/README.md @@ -0,0 +1,195 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + +# What this sample demonstrates + +This sample demonstrates a **key advantage of code-based hosted agents**: + +- **Local Python tool execution** - Run custom Python functions as agent tools + +Code-based agents can execute **any Python code** you write. This sample includes a Seattle Hotel Agent with a `get_available_hotels` tool that searches for available hotels based on check-in/check-out dates and budget preferences. + +The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/) and can be deployed to Microsoft Foundry. + +## How It Works + +### Local Tools Integration + +In [main.py](main.py), the agent uses a local Python function (`get_available_hotels`) that simulates a hotel availability API. This demonstrates how code-based agents can execute custom server-side logic that prompt agents cannot access. + +The tool accepts: + +- **check_in_date** - Check-in date in YYYY-MM-DD format +- **check_out_date** - Check-out date in YYYY-MM-DD format +- **max_price** - Maximum price per night in USD (optional, defaults to $500) + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), +which provisions a REST API endpoint compatible with the OpenAI Responses protocol. + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Microsoft Foundry Project** + - A Microsoft Project created. + - Chat model deployed (e.g., `gpt-4o` or `gpt-4.1`). + - Note your project endpoint URL and model deployment name. + +2. **Azure CLI** + - Installed and authenticated + - Run `az login` and verify with `az account show` + +3. **Python 3.10 or higher** + - Verify your version: `python --version` + - If you have Python 3.9 or older, install a newer version: + - Windows: `winget install Python.Python.3.12` + - macOS: `brew install python@3.12` + - Linux: Use your package manager + +### Environment Variables + +Set the following environment variables: + +- `PROJECT_ENDPOINT` - Your Microsoft Foundry project endpoint URL (required) +- `MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (defaults to `gpt-4.1-mini`) + +This sample loads environment variables from a local `.env` file if present. + +Create a `.env` file in this directory with the following content: + +``` +PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ +MODEL_DEPLOYMENT_NAME=gpt-4.1-mini +``` + +Or set them via PowerShell: + +```powershell +# Replace with your actual values +$env:PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +$env:MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +### Setting Up a Virtual Environment + +It's recommended to use a virtual environment to isolate project dependencies: + +**macOS/Linux:** + +```bash +python -m venv .venv +source .venv/bin/activate +``` + +**Windows (PowerShell):** + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +``` + +### Installing Dependencies + +Install the required Python dependencies using pip: + +```bash +pip install -r requirements.txt +``` + +The required packages are: + +- `azure-ai-agentserver-agentframework` - Agent Framework and AgentServer SDK + +### Running the Sample + +#### Option 1: Press F5 (Recommended) + +Press **F5** in VS Code to start debugging. Alternatively, you can use the VS Code debug menu: + +1. Open the **Run and Debug** view (Ctrl+Shift+D / Cmd+Shift+D) +2. Select **"Debug Local Workflow HTTP Server"** from the dropdown +3. Click the green **Start Debugging** button (or press F5) + +This will: + +1. Start the HTTP server with debugging enabled +2. Open the AI Toolkit Agent Inspector for interactive testing +3. Allow you to set breakpoints and inspect the workflow + +#### Option 2: Run in Terminal + +Run as HTTP server (default): + +```bash +python main.py +``` + +This will start the hosted agent locally on `http://localhost:8088/`. + +**PowerShell (Windows):** + +```powershell +$body = @{ + input = "I need a hotel in Seattle from 2025-03-15 to 2025-03-18, budget under `$200 per night" + stream = $false +} | ConvertTo-Json + +Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" +``` + +**Bash/curl (Linux/macOS):** + +```bash +curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ + -d '{"input": "Find me hotels in Seattle for March 20-23, 2025 under $200 per night","stream":false}' +``` + +The agent will use the `get_available_hotels` tool to search for available hotels matching your criteria. + +## Deploying the Agent to Microsoft Foundry + +To deploy the hosted agent: + +1. Open the VS Code Command Palette and run the `Microsoft Foundry: Deploy Hosted Agent` command. + +2. Follow the interactive deployment prompts. The extension will help you select or create the container files it needs: + - It first looks for a Dockerfile at the repository root. If not found, you can select an existing Dockerfile or generate a new one. + - If you choose to generate a Dockerfile, the extension will place the files at the repo root and open the Dockerfile in the editor; the deployment flow is intentionally cancelled in that case so you can review and edit the generated files before re-running the deploy command. + +3. After deployment completes, the hosted agent appears under the `Hosted Agents (Preview)` section of the extension tree. You can select the agent there to view details and test it using the integrated playground. + +**What the deploy flow does for you:** + +- Creates or obtains an Azure Container Registry for the target project. +- Builds and pushes a container image from your workspace (the build packages the workspace respecting `.dockerignore`). +- Creates an agent version in Microsoft Foundry using the built image. If a `.env` file exists at the workspace root, the extension will parse it and include its key/value pairs as the hosted agent's environment variables in the create request (these variables will be available to the agent runtime). +- Starts the agent container on the project's capability host. If the capability host is not provisioned, the extension will prompt you to enable it and will guide you through creating it. + +### MSI Configuration in the Azure Portal + +This sample requires the Microsoft Foundry Project to authenticate using a Managed Identity when running remotely in Azure. Grant the project's managed identity the required permissions by assigning the built-in [Azure AI User](https://aka.ms/foundry-ext-project-role) role. + +To configure the Managed Identity: + +1. In the Azure Portal, open the Foundry Project. +2. Select "Access control (IAM)" from the left-hand menu. +3. Click "Add" and choose "Add role assignment". +4. In the role selection, search for and select "Azure AI User", then click "Next". +5. For "Assign access to", choose "Managed identity". +6. Click "Select members", locate the managed identity associated with your Foundry Project (you can search by the project name), then click "Select". +7. Click "Review + assign" to complete the assignment. +8. Allow a few minutes for the role assignment to propagate before running the application. + +## Additional Resources + +- [Microsoft Agents Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview) +- [Managed Identities for Azure Resources](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/) diff --git a/samples/hosted-agent/python/agent/agent.yaml.tpl b/samples/hosted-agent/python/agent/agent.yaml.tpl new file mode 100644 index 0000000..89e57e5 --- /dev/null +++ b/samples/hosted-agent/python/agent/agent.yaml.tpl @@ -0,0 +1,25 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml + +kind: hosted +name: {{AgentName}} +description: > + A travel assistant agent that helps users find hotels in Seattle. + Demonstrates local Python tool execution - a key advantage of code-based + hosted agents over prompt agents. +metadata: + authors: + - Microsoft + tags: + - Azure AI AgentServer + - Microsoft Agent Framework + - Local Tools + - Travel Assistant + - Hotel Search +protocols: + - protocol: responses + version: v1 +environment_variables: + - name: PROJECT_ENDPOINT + value: {{=<% %>=}}"{{PROJECT_ENDPOINT}}"<%={{ }}=%> + - name: MODEL_DEPLOYMENT_NAME + value: {{=<% %>=}}"{{MODEL_DEPLOYMENT_NAME}}"<%={{ }}=%> \ No newline at end of file diff --git a/samples/hosted-agent/python/agent/main.py b/samples/hosted-agent/python/agent/main.py new file mode 100644 index 0000000..19acbdd --- /dev/null +++ b/samples/hosted-agent/python/agent/main.py @@ -0,0 +1,149 @@ +""" +Seattle Hotel Agent - A simple agent with a tool to find hotels in Seattle. +Uses Microsoft Agent Framework with Azure AI Foundry. +Ready for deployment to Foundry Hosted Agent service. +""" + +import asyncio +import os +from datetime import datetime +from typing import Annotated + +from dotenv import load_dotenv + +load_dotenv(override=True) + +from agent_framework.azure import AzureAIAgentClient +from azure.ai.agentserver.agentframework import from_agent_framework +from azure.identity.aio import DefaultAzureCredential + +# Configure these for your Foundry project +# Read the explicit variables present in the .env file +PROJECT_ENDPOINT = os.getenv( + "PROJECT_ENDPOINT" +) # e.g., "https://.services.ai.azure.com" +MODEL_DEPLOYMENT_NAME = os.getenv( + "MODEL_DEPLOYMENT_NAME", "gpt-4.1-mini" +) # Your model deployment name e.g., "gpt-4.1-mini" + + +# Simulated hotel data for Seattle +SEATTLE_HOTELS = [ + { + "name": "Contoso Suites", + "price_per_night": 189, + "rating": 4.5, + "location": "Downtown", + }, + { + "name": "Fabrikam Residences", + "price_per_night": 159, + "rating": 4.2, + "location": "Pike Place Market", + }, + { + "name": "Alpine Ski House", + "price_per_night": 249, + "rating": 4.7, + "location": "Seattle Center", + }, + { + "name": "Margie's Travel Lodge", + "price_per_night": 219, + "rating": 4.4, + "location": "Waterfront", + }, + { + "name": "Northwind Inn", + "price_per_night": 139, + "rating": 4.0, + "location": "Capitol Hill", + }, + { + "name": "Relecloud Hotel", + "price_per_night": 99, + "rating": 3.8, + "location": "University District", + }, +] + + +def get_available_hotels( + check_in_date: Annotated[str, "Check-in date in YYYY-MM-DD format"], + check_out_date: Annotated[str, "Check-out date in YYYY-MM-DD format"], + max_price: Annotated[int, "Maximum price per night in USD (optional)"] = 500, +) -> str: + """ + Get available hotels in Seattle for the specified dates. + This simulates a call to a fake hotel availability API. + """ + try: + # Parse dates + check_in = datetime.strptime(check_in_date, "%Y-%m-%d") + check_out = datetime.strptime(check_out_date, "%Y-%m-%d") + + # Validate dates + if check_out <= check_in: + return "Error: Check-out date must be after check-in date." + + nights = (check_out - check_in).days + + # Filter hotels by price + available_hotels = [ + hotel for hotel in SEATTLE_HOTELS if hotel["price_per_night"] <= max_price + ] + + if not available_hotels: + return ( + f"No hotels found in Seattle within your budget of ${max_price}/night." + ) + + # Build response + result = f"Available hotels in Seattle from {check_in_date} to {check_out_date} ({nights} nights):\n\n" + + for hotel in available_hotels: + total_cost = hotel["price_per_night"] * nights + result += f"**{hotel['name']}**\n" + result += f" Location: {hotel['location']}\n" + result += f" Rating: {hotel['rating']}/5\n" + result += f" ${hotel['price_per_night']}/night (Total: ${total_cost})\n\n" + + return result + + except ValueError as e: + return f"Error parsing dates. Please use YYYY-MM-DD format. Details: {str(e)}" + + +async def main(): + """Main function to run the agent as a web server.""" + async with ( + DefaultAzureCredential() as credential, + AzureAIAgentClient( + project_endpoint=PROJECT_ENDPOINT, + model_deployment_name=MODEL_DEPLOYMENT_NAME, + credential=credential, + ) as client, + ): + agent = client.create_agent( + name="SeattleHotelAgent", + instructions="""You are a helpful travel assistant specializing in finding hotels in Seattle, Washington. + +When a user asks about hotels in Seattle: +1. Ask for their check-in and check-out dates if not provided +2. Ask about their budget preferences if not mentioned +3. Use the get_available_hotels tool to find available options +4. Present the results in a friendly, informative way +5. Offer to help with additional questions about the hotels or Seattle + +Be conversational and helpful. If users ask about things outside of Seattle hotels, +politely let them know you specialize in Seattle hotel recommendations.""", + tools=[get_available_hotels], + ) + + print("Seattle Hotel Agent Server running on http://localhost:8088") + server = from_agent_framework(agent) + await server.run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/samples/hosted-agent/python/agent/requirements.txt b/samples/hosted-agent/python/agent/requirements.txt new file mode 100644 index 0000000..e2e7cd2 --- /dev/null +++ b/samples/hosted-agent/python/agent/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-agentframework==1.0.0b14 +debugpy +# Agent development CLI tool (preview) +agent-dev-cli \ No newline at end of file diff --git a/samples/hosted-agent/python/minimal/.dockerignore b/samples/hosted-agent/python/minimal/.dockerignore new file mode 100644 index 0000000..79cc807 --- /dev/null +++ b/samples/hosted-agent/python/minimal/.dockerignore @@ -0,0 +1,51 @@ +# Build artifacts +bin/ +obj/ + +# IDE and editor files +.vs/ +.vscode/ +*.user +*.suo +.foundry/ + +# Source control +.git/ + +# Documentation +README.md + +# Ignore files +.gitignore +.dockerignore + +# Logs +*.log + +# Temporary files +*.tmp +*.temp + +# OS files +.DS_Store +Thumbs.db + +# Package manager directories +node_modules/ +packages/ + +# Test results +TestResults/ +*.trx + +# Coverage reports +coverage/ +*.coverage +*.coveragexml + +# Local development config +appsettings.Development.json +.env + +.venv/ +__pycache__/ diff --git a/samples/hosted-agent/python/minimal/Dockerfile b/samples/hosted-agent/python/minimal/Dockerfile new file mode 100644 index 0000000..413c6ac --- /dev/null +++ b/samples/hosted-agent/python/minimal/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY ./ . + +RUN if [ -f requirements.txt ]; then \ + pip install --no-cache-dir -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] diff --git a/samples/hosted-agent/python/workflow/.dockerignore b/samples/hosted-agent/python/workflow/.dockerignore new file mode 100644 index 0000000..79cc807 --- /dev/null +++ b/samples/hosted-agent/python/workflow/.dockerignore @@ -0,0 +1,51 @@ +# Build artifacts +bin/ +obj/ + +# IDE and editor files +.vs/ +.vscode/ +*.user +*.suo +.foundry/ + +# Source control +.git/ + +# Documentation +README.md + +# Ignore files +.gitignore +.dockerignore + +# Logs +*.log + +# Temporary files +*.tmp +*.temp + +# OS files +.DS_Store +Thumbs.db + +# Package manager directories +node_modules/ +packages/ + +# Test results +TestResults/ +*.trx + +# Coverage reports +coverage/ +*.coverage +*.coveragexml + +# Local development config +appsettings.Development.json +.env + +.venv/ +__pycache__/ diff --git a/samples/hosted-agent/python/workflow/.env.tpl b/samples/hosted-agent/python/workflow/.env.tpl new file mode 100644 index 0000000..2ce907d --- /dev/null +++ b/samples/hosted-agent/python/workflow/.env.tpl @@ -0,0 +1,4 @@ +# IMPORTANT: Never commit .env to version control - add it to .gitignore + +PROJECT_ENDPOINT={{{AzureAIProjectEndpoint}}} +MODEL_DEPLOYMENT_NAME={{ModelDeploymentName}} \ No newline at end of file diff --git a/samples/hosted-agent/python/workflow/.vscode/launch.json b/samples/hosted-agent/python/workflow/.vscode/launch.json new file mode 100644 index 0000000..fe92f4a --- /dev/null +++ b/samples/hosted-agent/python/workflow/.vscode/launch.json @@ -0,0 +1,17 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Debug Local Workflow HTTP Server", + "type": "debugpy", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5679 + }, + "preLaunchTask": "Open Agent Inspector", + "internalConsoleOptions": "neverOpen", + "postDebugTask": "Terminate All Tasks" + } + ] +} diff --git a/samples/hosted-agent/python/workflow/.vscode/tasks.json b/samples/hosted-agent/python/workflow/.vscode/tasks.json new file mode 100644 index 0000000..4dbd4ea --- /dev/null +++ b/samples/hosted-agent/python/workflow/.vscode/tasks.json @@ -0,0 +1,70 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "Validate prerequisites", + "type": "aitk", + "command": "debug-check-prerequisites", + "args": { + "portOccupancy": [5679, 8088] + } + }, + { + "label": "Run Agent/Workflow HTTP Server", + "type": "shell", + "command": "${command:python.interpreterPath} -m debugpy --listen 127.0.0.1:5679 -m agentdev run main.py --verbose --port 8088", + "isBackground": true, + "options": { + "cwd": "${workspaceFolder}" + }, + "dependsOn": ["Validate prerequisites"], + "problemMatcher": { + "pattern": [ + { + "regexp": "^.*$", + "file": 0, + "location": 1, + "message": 2 + } + ], + "background": { + "activeOnStart": true, + "beginsPattern": ".*", + "endsPattern": "Application startup complete|running on|Started server process" + } + } + }, + { + "label": "Open Agent Inspector", + "type": "shell", + "command": "echo '${input:openAgentInspector}'", + "presentation": { + "reveal": "never" + }, + "dependsOn": ["Run Agent/Workflow HTTP Server"] + }, + { + "label": "Terminate All Tasks", + "command": "echo ${input:terminate}", + "type": "shell", + "problemMatcher": [] + } + ], + "inputs": [ + { + "id": "openAgentInspector", + "type": "command", + "command": "ai-mlstudio.openTestTool", + "args": { + "triggeredFrom": "tasks", + "port": 8088 + } + }, + { + "id": "terminate", + "type": "command", + "command": "workbench.action.tasks.terminate", + "args": "terminateAll" + } + ] +} diff --git a/samples/hosted-agent/python/workflow/Dockerfile b/samples/hosted-agent/python/workflow/Dockerfile new file mode 100644 index 0000000..413c6ac --- /dev/null +++ b/samples/hosted-agent/python/workflow/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY ./ . + +RUN if [ -f requirements.txt ]; then \ + pip install --no-cache-dir -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] diff --git a/samples/hosted-agent/python/workflow/README.md b/samples/hosted-agent/python/workflow/README.md new file mode 100644 index 0000000..3fd239e --- /dev/null +++ b/samples/hosted-agent/python/workflow/README.md @@ -0,0 +1,204 @@ +**IMPORTANT!** All samples and other resources made available in this GitHub repository ("samples") are designed to assist in accelerating development of agents, solutions, and agent workflows for various scenarios. Review all provided resources and carefully test output behavior in the context of your use case. AI responses may be inaccurate and AI actions should be monitored with human oversight. Learn more in the transparency documents for [Agent Service](https://learn.microsoft.com/en-us/azure/ai-foundry/responsible-ai/agents/transparency-note) and [Agent Framework](https://github.com/microsoft/agent-framework/blob/main/TRANSPARENCY_FAQ.md). + +Agents, solutions, or other output you create may be subject to legal and regulatory requirements, may require licenses, or may not be suitable for all industries, scenarios, or use cases. By using any sample, you are acknowledging that any output created using those samples are solely your responsibility, and that you will comply with all applicable laws, regulations, and relevant safety standards, terms of service, and codes of conduct. + +Third-party samples contained in this folder are subject to their own designated terms, and they have not been tested or verified by Microsoft or its affiliates. + +Microsoft has no responsibility to you or others with respect to any of these samples or any resulting output. + +# What this sample demonstrates + +This sample demonstrates a **key advantage of code-based hosted agents**: + +- **Multi-agent workflows** - Orchestrate multiple agents working together + +Code-based agents can execute **any Python code** you write. This sample includes a **Writer-Reviewer workflow** where two agents collaborate: a Writer creates content and a Reviewer provides feedback. + +The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/) and can be deployed to Microsoft Foundry. + +## How It Works + +### Multi-Agent Workflow + +In [main.py](main.py), the sample creates two agents: + +- **Writer** - An agent that creates and edits content based on feedback +- **Reviewer** - An agent that provides actionable feedback on the content + +The `WorkflowBuilder` connects these agents in a sequential flow: + +1. The Writer receives the initial request and generates content +2. The Reviewer evaluates the content and provides feedback +3. Both agent responses are output to the user. + +### Agent Hosting + +The agent is hosted using the [Azure AI AgentServer SDK](https://pypi.org/project/azure-ai-agentserver-agentframework/), +which provisions a REST API endpoint compatible with the OpenAI Responses protocol. + +## Running the Agent Locally + +### Prerequisites + +Before running this sample, ensure you have: + +1. **Microsoft Foundry Project** + - A Microsoft Project created. + - Chat model deployed (e.g., `gpt-4o` or `gpt-4.1`). + - Note your project endpoint URL and model deployment name. + +2. **Azure CLI** + - Installed and authenticated + - Run `az login` and verify with `az account show` + +3. **Python 3.10 or higher** + - Verify your version: `python --version` + - If you have Python 3.9 or older, install a newer version: + - Windows: `winget install Python.Python.3.12` + - macOS: `brew install python@3.12` + - Linux: Use your package manager + +### Environment Variables + +Set the following environment variables: + +- `PROJECT_ENDPOINT` - Your Microsoft Foundry project endpoint URL (required) +- `MODEL_DEPLOYMENT_NAME` - The deployment name for your chat model (defaults to `gpt-4.1-mini`) + +This sample loads environment variables from a local `.env` file if present. + +Create a `.env` file in this directory with the following content: + +``` +PROJECT_ENDPOINT=https://.services.ai.azure.com/api/projects/ +MODEL_DEPLOYMENT_NAME=gpt-4.1-mini +``` + +Or set them via PowerShell: + +```powershell +# Replace with your actual values +$env:PROJECT_ENDPOINT="https://.services.ai.azure.com/api/projects/" +$env:MODEL_DEPLOYMENT_NAME="gpt-4.1-mini" +``` + +### Setting Up a Virtual Environment + +It's recommended to use a virtual environment to isolate project dependencies: + +**macOS/Linux:** + +```bash +python -m venv .venv +source .venv/bin/activate +``` + +**Windows (PowerShell):** + +```powershell +python -m venv .venv +.\.venv\Scripts\Activate.ps1 +``` + +### Installing Dependencies + +Install the required Python dependencies using pip: + +```bash +pip install -r requirements.txt +``` + +The required packages are: + +- `azure-ai-agentserver-agentframework` - Agent Framework and AgentServer SDK + +### Running the Sample + +### Option 1: Press F5 (Recommended) + +Press **F5** in VS Code to start debugging. Alternatively, you can use the VS Code debug menu: + +1. Open the **Run and Debug** view (Ctrl+Shift+D / Cmd+Shift+D) +2. Select **"Debug Local Workflow HTTP Server"** from the dropdown +3. Click the green **Start Debugging** button (or press F5) + +This will: + +1. Start the HTTP server with debugging enabled +2. Open the AI Toolkit Agent Inspector for interactive testing +3. Allow you to set breakpoints and inspect the workflow + +### Option 2: Run in Terminal + +Run as HTTP server (default): + +```bash +python main.py +``` + +This will start the hosted agent locally on `http://localhost:8088/`. + +**PowerShell (Windows):** + +```powershell +$body = @{ + input = "Create a slogan for a new electric SUV that is affordable and fun to drive" + stream = $false +} | ConvertTo-Json + +Invoke-RestMethod -Uri http://localhost:8088/responses -Method Post -Body $body -ContentType "application/json" +``` + +**Bash/curl (Linux/macOS):** + +```bash +curl -sS -H "Content-Type: application/json" -X POST http://localhost:8088/responses \ + -d '{"input": "Create a slogan for a new electric SUV that is affordable and fun to drive","stream":false}' +``` + +The agent will respond with both the Writer's slogan and the Reviewer's feedback. + +Or run in CLI mode for quick testing: + +```bash +python main.py --cli +``` + +## Deploying the Agent to Microsoft Foundry + +To deploy the hosted agent: + +1. Open the VS Code Command Palette and run the `Microsoft Foundry: Deploy Hosted Agent` command. + +2. Follow the interactive deployment prompts. The extension will help you select or create the container files it needs: + - It first looks for a Dockerfile at the repository root. If not found, you can select an existing Dockerfile or generate a new one. + - If you choose to generate a Dockerfile, the extension will place the files at the repo root and open the Dockerfile in the editor; the deployment flow is intentionally cancelled in that case so you can review and edit the generated files before re-running the deploy command. + +3. After deployment completes, the hosted agent appears under the `Hosted Agents (Preview)` section of the extension tree. You can select the agent there to view details and test it using the integrated playground. + +**What the deploy flow does for you:** + +- Creates or obtains an Azure Container Registry for the target project. +- Builds and pushes a container image from your workspace (the build packages the workspace respecting `.dockerignore`). +- Creates an agent version in Microsoft Foundry using the built image. If a `.env` file exists at the workspace root, the extension will parse it and include its key/value pairs as the hosted agent's environment variables in the create request (these variables will be available to the agent runtime). +- Starts the agent container on the project's capability host. If the capability host is not provisioned, the extension will prompt you to enable it and will guide you through creating it. + +### MSI Configuration in the Azure Portal + +This sample requires the Microsoft Foundry Project to authenticate using a Managed Identity when running remotely in Azure. Grant the project's managed identity the required permissions by assigning the built-in [Azure AI User](https://aka.ms/foundry-ext-project-role) role. + +To configure the Managed Identity: + +1. In the Azure Portal, open the Foundry Project. +2. Select "Access control (IAM)" from the left-hand menu. +3. Click "Add" and choose "Add role assignment". +4. In the role selection, search for and select "Azure AI User", then click "Next". +5. For "Assign access to", choose "Managed identity". +6. Click "Select members", locate the managed identity associated with your Foundry Project (you can search by the project name), then click "Select". +7. Click "Review + assign" to complete the assignment. +8. Allow a few minutes for the role assignment to propagate before running the application. + +## Additional Resources + +- [Microsoft Agents Framework](https://learn.microsoft.com/en-us/agent-framework/overview/agent-framework-overview) +- [Managed Identities for Azure Resources](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/) diff --git a/samples/hosted-agent/python/workflow/agent.yaml.tpl b/samples/hosted-agent/python/workflow/agent.yaml.tpl new file mode 100644 index 0000000..054f56e --- /dev/null +++ b/samples/hosted-agent/python/workflow/agent.yaml.tpl @@ -0,0 +1,24 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/microsoft/AgentSchema/refs/heads/main/schemas/v1.0/ContainerAgent.yaml + +kind: hosted +name: {{AgentName}} +description: > + A multi-agent workflow featuring a Writer and Reviewer that collaborate + to create and refine content. +metadata: + authors: + - Microsoft + tags: + - Azure AI AgentServer + - Microsoft Agent Framework + - Multi-Agent Workflow + - Writer-Reviewer + - Content Creation +protocols: + - protocol: responses + version: v1 +environment_variables: + - name: PROJECT_ENDPOINT + value: {{=<% %>=}}"{{PROJECT_ENDPOINT}}"<%={{ }}=%> + - name: MODEL_DEPLOYMENT_NAME + value: {{=<% %>=}}"{{MODEL_DEPLOYMENT_NAME}}"<%={{ }}=%> diff --git a/samples/hosted-agent/python/workflow/main.py b/samples/hosted-agent/python/workflow/main.py new file mode 100644 index 0000000..6365887 --- /dev/null +++ b/samples/hosted-agent/python/workflow/main.py @@ -0,0 +1,107 @@ +import asyncio +import os +import sys +from contextlib import asynccontextmanager + +from agent_framework import WorkflowBuilder +from agent_framework.azure import AzureAIAgentClient +from azure.identity.aio import DefaultAzureCredential, ManagedIdentityCredential +from dotenv import load_dotenv + +load_dotenv(override=True) + +# Configure these for your Foundry project +# Read the explicit variables present in the .env file +PROJECT_ENDPOINT = os.getenv( + "PROJECT_ENDPOINT" +) # e.g., "https://.services.ai.azure.com" +MODEL_DEPLOYMENT_NAME = os.getenv( + "MODEL_DEPLOYMENT_NAME", "gpt-4.1-mini" +) # Your model deployment name e.g., "gpt-4.1-mini" + + +def get_credential(): + """Will use Managed Identity when running in Azure, otherwise falls back to DefaultAzureCredential.""" + return ( + ManagedIdentityCredential() + if os.getenv("MSI_ENDPOINT") + else DefaultAzureCredential() + ) + + +@asynccontextmanager +async def create_agents(): + async with ( + get_credential() as credential, + AzureAIAgentClient( + project_endpoint=PROJECT_ENDPOINT, + model_deployment_name=MODEL_DEPLOYMENT_NAME, + credential=credential, + ) as writer_client, + AzureAIAgentClient( + project_endpoint=PROJECT_ENDPOINT, + model_deployment_name=MODEL_DEPLOYMENT_NAME, + credential=credential, + ) as reviewer_client, + ): + writer = writer_client.create_agent( + name="Writer", + instructions="You are an excellent content writer. You create new content and edit contents based on the feedback.", + ) + reviewer = reviewer_client.create_agent( + name="Reviewer", + instructions="You are an excellent content reviewer. Provide actionable feedback to the writer about the provided content in the most concise manner possible.", + ) + yield writer, reviewer + + +def create_workflow(writer, reviewer): + workflow = ( + WorkflowBuilder(name="Writer-Reviewer") + .register_agent(lambda: writer, name="Writer", output_response=True) + .register_agent(lambda: reviewer, name="Reviewer", output_response=True) + .set_start_executor("Writer") + .add_edge("Writer", "Reviewer") + .build() + ) + return workflow.as_agent() + + +async def main() -> None: + """ + The writer and reviewer multi-agent workflow. + + Usage: + python main.py # Run in server mode + + Environment variables required: + - PROJECT_ENDPOINT: Your Microsoft Foundry project endpoint + - MODEL_DEPLOYMENT_NAME: Your Microsoft Foundry model deployment name + """ + + async with create_agents() as (writer, reviewer): + agent = create_workflow(writer, reviewer) + + # Check if running in CLI mode (default is server mode) + if "--cli" in sys.argv: + # CLI mode for testing + print("Running workflow agent in CLI mode...") + + # Test with a sample query + user_message = "Create a slogan for a new electric SUV that is affordable and fun to drive." + print(f"\nUser: {user_message}\n") + + response = await agent.run(user_message) + for msg in response.messages: + if msg.text: + print(f"{msg.author_name}: {msg.text}\n") + else: + # Server mode (default) + print("Starting workflow agent HTTP server...") + from azure.ai.agentserver.agentframework import from_agent_framework + + await from_agent_framework(agent).run_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/samples/hosted-agent/python/workflow/requirements.txt b/samples/hosted-agent/python/workflow/requirements.txt new file mode 100644 index 0000000..e2e7cd2 --- /dev/null +++ b/samples/hosted-agent/python/workflow/requirements.txt @@ -0,0 +1,4 @@ +azure-ai-agentserver-agentframework==1.0.0b14 +debugpy +# Agent development CLI tool (preview) +agent-dev-cli \ No newline at end of file diff --git a/samples/hosted-agent/version-manifest.json b/samples/hosted-agent/version-manifest.json new file mode 100644 index 0000000..c11b813 --- /dev/null +++ b/samples/hosted-agent/version-manifest.json @@ -0,0 +1,21 @@ + +{ + "schemaVersion": "1.0", + "lastUpdated": "2026-02-28T10:00:00Z", + "channels": { + "stable": { + "branch": "hui/workflow-samples", + "sampleVersion": "v1.0.0", + "downloadUrl": "https://github.com/microsoft/foundry-hosted-agent-samples/archive/refs/heads/hui/workflow-samples.zip", + "sha256": "abc123...", + "lastUpdated": "2026-02-28T10:00:00Z" + }, + "pre-release": { + "branch": "hui/workflow-samples-pre-release", + "sampleVersion": "v1.1.0", + "downloadUrl": "https://github.com/microsoft/foundry-hosted-agent-samples/archive/refs/heads/hui/workflow-samples-pre-release.zip", + "sha256": "def456...", + "lastUpdated": "2026-02-28T14:30:00Z" + } + } +} diff --git a/samples/workflows/simple_workflow.py b/samples/workflows/simple_workflow.py deleted file mode 100644 index d111849..0000000 --- a/samples/workflows/simple_workflow.py +++ /dev/null @@ -1,177 +0,0 @@ -import asyncio -import os -from dataclasses import dataclass -from uuid import uuid4 - -from agent_framework import ( - AgentRunResponseUpdate, - AgentRunUpdateEvent, - ChatAgent, - ChatMessage, - Executor, - Role, - TextContent, - WorkflowBuilder, - WorkflowContext, - handler, -) -from agent_framework.azure import AzureOpenAIChatClient -from agent_framework.observability import setup_observability -from dotenv import load_dotenv - -load_dotenv() - -# ============================================================================= -# USER CONFIGURATION - SET THESE AS ENVIRONMENT VARIABLES -# ============================================================================= -AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") -AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY") -MODEL_DEPLOYMENT_NAME = os.getenv("MODEL_DEPLOYMENT_NAME") - -# ============================================================================= - - -@dataclass -class StudentResponse: - messages: list[ChatMessage] - - -def create_openai_chat_client(): - """Create OpenAI chat client with explicit settings.""" - - return AzureOpenAIChatClient( - api_key=AZURE_OPENAI_API_KEY, - deployment_name=MODEL_DEPLOYMENT_NAME, - endpoint=AZURE_OPENAI_ENDPOINT, - ) - - -class StudentAgentExecutor(Executor): - agent: ChatAgent - - def __init__(self, agent: ChatAgent, id="student"): - super().__init__(agent=agent, id=id) - self.agent = agent - - @handler - async def handle_teacher_question( - self, messages: list[ChatMessage], ctx: WorkflowContext[StudentResponse] - ) -> None: - if messages and "completed" in messages[-1].contents[-1].text.lower(): - await ctx.yield_output( - "šŸŽ‰ Student-teacher conversation completed after 2 turns!" - ) - return - - response = await self.agent.run(messages) - print(f"Student: {response.messages[-1].contents[-1].text}") - - for message in response.messages: - if message.role == Role.ASSISTANT: - await ctx.add_event( - AgentRunUpdateEvent( - self.id, - data=AgentRunResponseUpdate( - contents=[TextContent(text=f"Student: {message.contents[-1].text}")], - role=Role.ASSISTANT, - response_id=str(uuid4()), - ), - ) - ) - - messages.extend(response.messages) - await ctx.send_message(StudentResponse(messages=messages)) - - -class TeacherAgentExecutor(Executor): - agent: ChatAgent - - def __init__(self, agent: ChatAgent, id="teacher"): - super().__init__(agent=agent, id=id) - self.agent = agent - - async def _handle_response(self, messages, ctx, response): - print(f"Teacher: {response.messages[-1].contents[-1].text}") - for message in response.messages: - if message.role == Role.ASSISTANT: - await ctx.add_event( - AgentRunUpdateEvent( - self.id, - data=AgentRunResponseUpdate( - contents=[TextContent(text=f"Teacher: {message.contents[-1].text}")], - role=Role.ASSISTANT, - response_id=str(uuid4()), - ), - ) - ) - messages.extend(response.messages) - await ctx.send_message(messages) - - @handler - async def handle_user_message( - self, messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMessage]] - ) -> None: - response = await self.agent.run(messages) - await self._handle_response(messages, ctx, response) - - @handler - async def handle_student_answer( - self, student_response: StudentResponse, ctx: WorkflowContext[list[ChatMessage]] - ) -> None: - messages = student_response.messages - response = await self.agent.run(messages) - await self._handle_response(messages, ctx, response) - - -def create_workflow_from_client(): - """Create workflow using OpenAI chat client with explicit settings.""" - - # Create OpenAI chat client - chat_client = create_openai_chat_client() - - # Create student agent - student_agent = chat_client.create_agent( - instructions="""You are Jamie, a student. Only answer the teacher's questions. For each teacher question, reply with a concise factual answer in one sentence (1-2 sentences max). Do not ask questions, do not add commentary or feedback, and do not provide extra information beyond the direct answer. If the teacher says 'Completed', stop and do not respond further. Keep answers short and precise.""" - ) - - # Create teacher agent - teacher_agent = chat_client.create_agent( - instructions="""You are Dr. Smith, a teacher. Follow this exact pattern: Ask exactly two direct questions total, one at a time. Each question must come from a different academic subject (for example: Geography, Science, History, Math, Literature). Start immediately by asking Question 1 — do NOT ask what topic the student wants or present a list of options. After the student answers Question 1, ask Question 2 from a different subject. After the student answers Question 2, reply with only the single word Completed (capital C, no punctuation, nothing else). Do not give feedback, corrections, explanations, or ask follow-up questions. Keep each question short and direct (ideally 3-8 words).""" - ) - - # Create executors - student_executor = StudentAgentExecutor(student_agent) - teacher_executor = TeacherAgentExecutor(teacher_agent) - - workflow = ( - WorkflowBuilder() - .add_edge(teacher_executor, student_executor) - .add_edge(student_executor, teacher_executor) - .set_start_executor(teacher_executor) - .build() - ) - - return workflow - - -async def main(): - """Main function to run the student-teacher workflow.""" - - # Configure observability for workflow visualization - setup_observability(vs_code_extension_port=4319) - - try: - workflow = create_workflow_from_client() - message = ChatMessage( - role=Role.USER, contents=[TextContent("Start the quiz session.")] - ) - async for _ in workflow.run_stream([message]): - pass - - except Exception as e: - print(f"Error running workflow: {e}") - raise - - -if __name__ == "__main__": - asyncio.run(main()) From 9e4d2aea7ef6857acc77f93a30891bd6927e67ad Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:17:00 +0800 Subject: [PATCH 09/21] chore: update upload-artifact action to v4 --- .github/workflows/package-hosted-agents.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/package-hosted-agents.yml b/.github/workflows/package-hosted-agents.yml index 9ec0bb8..c70fa84 100644 --- a/.github/workflows/package-hosted-agents.yml +++ b/.github/workflows/package-hosted-agents.yml @@ -41,7 +41,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload packages as artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: hosted-agents-packages path: samples/hosted-agent/*-hosted-agents.zip From dcc94b94f38f4caec4f6a4505feeaaca72cdcb51 Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:20:10 +0800 Subject: [PATCH 10/21] feat: enable manual triggering of the package hosted agents workflow --- .github/workflows/package-hosted-agents.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/package-hosted-agents.yml b/.github/workflows/package-hosted-agents.yml index c70fa84..94faf15 100644 --- a/.github/workflows/package-hosted-agents.yml +++ b/.github/workflows/package-hosted-agents.yml @@ -7,6 +7,7 @@ on: branches: - hui/workflow-samples - main + workflow_dispatch: permissions: contents: write From 42ae22a41b905c835824e3e1f6fa301ed4ec5f0b Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:22:06 +0800 Subject: [PATCH 11/21] fix: correct lastUpdated timestamp for pre-release channel in version manifest --- samples/hosted-agent/version-manifest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/hosted-agent/version-manifest.json b/samples/hosted-agent/version-manifest.json index c11b813..4d20016 100644 --- a/samples/hosted-agent/version-manifest.json +++ b/samples/hosted-agent/version-manifest.json @@ -15,7 +15,7 @@ "sampleVersion": "v1.1.0", "downloadUrl": "https://github.com/microsoft/foundry-hosted-agent-samples/archive/refs/heads/hui/workflow-samples-pre-release.zip", "sha256": "def456...", - "lastUpdated": "2026-02-28T14:30:00Z" + "lastUpdated": "2026-02-28T10:00:00Z" } } } From cb815cdbc03c3954af9ce5af4785bd6df1851c5b Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:25:38 +0800 Subject: [PATCH 12/21] feat: add version reading and tagging for hosted agent packages --- .github/workflows/package-hosted-agents.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/package-hosted-agents.yml b/.github/workflows/package-hosted-agents.yml index 94faf15..826fddf 100644 --- a/.github/workflows/package-hosted-agents.yml +++ b/.github/workflows/package-hosted-agents.yml @@ -4,6 +4,7 @@ on: push: paths: - "samples/hosted-agent/**" + - ".github/workflows/package-hosted-agents.yml" branches: - hui/workflow-samples - main @@ -19,6 +20,12 @@ jobs: - name: Checkout code uses: actions/checkout@v4 + - name: Read version from manifest + id: version + run: | + VERSION=$(jq -r '.channels.stable.sampleVersion' samples/hosted-agent/version-manifest.json) + echo "tag=$VERSION" >> $GITHUB_OUTPUT + - name: Create dotnet agent package run: | cd samples/hosted-agent @@ -34,6 +41,8 @@ jobs: - name: Upload packages to Release uses: softprops/action-gh-release@v1 with: + tag_name: ${{ steps.version.outputs.tag }} + name: Hosted Agents ${{ steps.version.outputs.tag }} files: | samples/hosted-agent/dotnet-hosted-agents.zip samples/hosted-agent/python-hosted-agents.zip From 409da7d1fc4bcd0eefb58d21f2aee5f417765504 Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:36:45 +0800 Subject: [PATCH 13/21] refactor: add workflow for packaging hosted agents with versioning and artifact upload --- ...ge-hosted-agents.yml => package_sample.yml} | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) rename .github/workflows/{package-hosted-agents.yml => package_sample.yml} (75%) diff --git a/.github/workflows/package-hosted-agents.yml b/.github/workflows/package_sample.yml similarity index 75% rename from .github/workflows/package-hosted-agents.yml rename to .github/workflows/package_sample.yml index 826fddf..6351005 100644 --- a/.github/workflows/package-hosted-agents.yml +++ b/.github/workflows/package_sample.yml @@ -4,7 +4,7 @@ on: push: paths: - "samples/hosted-agent/**" - - ".github/workflows/package-hosted-agents.yml" + - ".github/workflows/package-sample.yml" branches: - hui/workflow-samples - main @@ -29,14 +29,14 @@ jobs: - name: Create dotnet agent package run: | cd samples/hosted-agent - zip -r dotnet-hosted-agents.zip dotnet/ - ls -lh dotnet-hosted-agents.zip + zip -r dotnet.zip dotnet/ + ls -lh dotnet.zip - name: Create python agent package run: | cd samples/hosted-agent - zip -r python-hosted-agents.zip python/ - ls -lh python-hosted-agents.zip + zip -r python.zip python/ + ls -lh python.zip - name: Upload packages to Release uses: softprops/action-gh-release@v1 @@ -44,8 +44,8 @@ jobs: tag_name: ${{ steps.version.outputs.tag }} name: Hosted Agents ${{ steps.version.outputs.tag }} files: | - samples/hosted-agent/dotnet-hosted-agents.zip - samples/hosted-agent/python-hosted-agents.zip + samples/hosted-agent/dotnet.zip + samples/hosted-agent/python.zip generate_release_notes: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -54,5 +54,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: hosted-agents-packages - path: samples/hosted-agent/*-hosted-agents.zip + path: | + samples/hosted-agent/dotnet.zip + samples/hosted-agent/python.zip retention-days: 30 From c76820ef8d0b5160cad2bbc67e69d137e8c3ce72 Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sat, 28 Feb 2026 18:39:07 +0800 Subject: [PATCH 14/21] fix: correct file path in workflow trigger for package_sample.yml --- .github/workflows/package_sample.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/package_sample.yml b/.github/workflows/package_sample.yml index 6351005..93a95b2 100644 --- a/.github/workflows/package_sample.yml +++ b/.github/workflows/package_sample.yml @@ -4,7 +4,7 @@ on: push: paths: - "samples/hosted-agent/**" - - ".github/workflows/package-sample.yml" + - ".github/workflows/package_sample.yml" branches: - hui/workflow-samples - main From d55dbfa213c4fe86838e3fb592c5d113eac7267e Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Sun, 1 Mar 2026 14:02:24 +0800 Subject: [PATCH 15/21] fix: update download URLs and add language support in version manifest --- samples/hosted-agent/version-manifest.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/hosted-agent/version-manifest.json b/samples/hosted-agent/version-manifest.json index 4d20016..1a70f92 100644 --- a/samples/hosted-agent/version-manifest.json +++ b/samples/hosted-agent/version-manifest.json @@ -6,15 +6,15 @@ "stable": { "branch": "hui/workflow-samples", "sampleVersion": "v1.0.0", - "downloadUrl": "https://github.com/microsoft/foundry-hosted-agent-samples/archive/refs/heads/hui/workflow-samples.zip", - "sha256": "abc123...", + "lang": ["dotnet", "python"], + "downloadUrl": "https://github.com/microsoft/ai-foundry-for-vscode/releases/download/v1.0.0/{{lang}}.zip", "lastUpdated": "2026-02-28T10:00:00Z" }, "pre-release": { "branch": "hui/workflow-samples-pre-release", "sampleVersion": "v1.1.0", - "downloadUrl": "https://github.com/microsoft/foundry-hosted-agent-samples/archive/refs/heads/hui/workflow-samples-pre-release.zip", - "sha256": "def456...", + "lang": ["dotnet", "python"], + "downloadUrl": "https://github.com/microsoft/ai-foundry-for-vscode/releases/download/v1.1.0/{{lang}}.zip", "lastUpdated": "2026-02-28T10:00:00Z" } } From a7df7f5a593ddfc629beedc8f97afa9ac7467f2a Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Wed, 4 Mar 2026 13:49:43 +0800 Subject: [PATCH 16/21] fix: remove unnecessary blank line in version manifest --- samples/hosted-agent/version-manifest.json | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/hosted-agent/version-manifest.json b/samples/hosted-agent/version-manifest.json index 1a70f92..62fe48f 100644 --- a/samples/hosted-agent/version-manifest.json +++ b/samples/hosted-agent/version-manifest.json @@ -1,4 +1,3 @@ - { "schemaVersion": "1.0", "lastUpdated": "2026-02-28T10:00:00Z", From d3443906aaedded5ab6023454e0abeaf0f93fdaa Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Wed, 4 Mar 2026 14:04:19 +0800 Subject: [PATCH 17/21] feat: add release workflow for hosted agents with packaging and upload steps --- ...{package_sample.yml => release_sample.yml} | 50 ++++++++++++------- 1 file changed, 33 insertions(+), 17 deletions(-) rename .github/workflows/{package_sample.yml => release_sample.yml} (59%) diff --git a/.github/workflows/package_sample.yml b/.github/workflows/release_sample.yml similarity index 59% rename from .github/workflows/package_sample.yml rename to .github/workflows/release_sample.yml index 93a95b2..6ec506a 100644 --- a/.github/workflows/package_sample.yml +++ b/.github/workflows/release_sample.yml @@ -1,21 +1,19 @@ -name: Package Hosted Agents +name: Release Hosted Agents on: push: - paths: - - "samples/hosted-agent/**" - - ".github/workflows/package_sample.yml" branches: - hui/workflow-samples - - main workflow_dispatch: permissions: contents: write jobs: - package-agents: + build: runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.tag }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -38,23 +36,41 @@ jobs: zip -r python.zip python/ ls -lh python.zip + - name: Upload packages as artifacts + uses: actions/upload-artifact@v4 + with: + name: hosted-agents-packages + path: | + samples/hosted-agent/dotnet.zip + samples/hosted-agent/python.zip + retention-days: 30 + + release: + needs: build + runs-on: ubuntu-latest + # ------------------------------------------------------------ + # This environment requires manual approval before proceeding. + # Go to: Settings → Environments → release-approval + # Add the specific GitHub users/teams as "Required reviewers". + # They will receive a notification with an approval link when + # this workflow runs. + # ------------------------------------------------------------ + environment: release-approval + steps: + - name: Download packages + uses: actions/download-artifact@v4 + with: + name: hosted-agents-packages + path: samples/hosted-agent/ + - name: Upload packages to Release uses: softprops/action-gh-release@v1 with: - tag_name: ${{ steps.version.outputs.tag }} - name: Hosted Agents ${{ steps.version.outputs.tag }} + tag_name: ${{ needs.build.outputs.version }} + name: Hosted Agents ${{ needs.build.outputs.version }} files: | samples/hosted-agent/dotnet.zip samples/hosted-agent/python.zip generate_release_notes: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload packages as artifacts - uses: actions/upload-artifact@v4 - with: - name: hosted-agents-packages - path: | - samples/hosted-agent/dotnet.zip - samples/hosted-agent/python.zip - retention-days: 30 From 1ac9c5829c700596d6611aab5307ef65a4a21900 Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Wed, 4 Mar 2026 20:01:57 +0800 Subject: [PATCH 18/21] feat: implement hosted agent workflow with multi-agent collaboration for hotel search --- samples/hosted-agent/dotnet/agent/.env.tpl | 4 ---- .../dotnet/agent/{Dockerfile.tpl => Dockerfile} | 0 .../dotnet/agent/{Program.cs.tpl => Program.cs} | 0 .../dotnet/agent/{agent.yaml.tpl => agent.yaml} | 0 .../dotnet/agent/appsettings.Development.json.tpl | 6 ------ samples/hosted-agent/dotnet/workflow/{.env.tpl => .env} | 0 .../dotnet/workflow/{Dockerfile.tpl => Dockerfile} | 0 .../dotnet/workflow/{Program.cs.tpl => Program.cs} | 0 .../dotnet/workflow/{agent.yaml.tpl => agent.yaml} | 0 ...gs.Development.json.tpl => appsettings.Development.json} | 0 samples/hosted-agent/python/agent/{.env.tpl => .env} | 0 .../python/agent/{agent.yaml.tpl => agent.yaml} | 0 samples/hosted-agent/python/workflow/{.env.tpl => .env} | 0 .../python/workflow/{agent.yaml.tpl => agent.yaml} | 0 14 files changed, 10 deletions(-) delete mode 100644 samples/hosted-agent/dotnet/agent/.env.tpl rename samples/hosted-agent/dotnet/agent/{Dockerfile.tpl => Dockerfile} (100%) rename samples/hosted-agent/dotnet/agent/{Program.cs.tpl => Program.cs} (100%) rename samples/hosted-agent/dotnet/agent/{agent.yaml.tpl => agent.yaml} (100%) delete mode 100644 samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl rename samples/hosted-agent/dotnet/workflow/{.env.tpl => .env} (100%) rename samples/hosted-agent/dotnet/workflow/{Dockerfile.tpl => Dockerfile} (100%) rename samples/hosted-agent/dotnet/workflow/{Program.cs.tpl => Program.cs} (100%) rename samples/hosted-agent/dotnet/workflow/{agent.yaml.tpl => agent.yaml} (100%) rename samples/hosted-agent/dotnet/workflow/{appsettings.Development.json.tpl => appsettings.Development.json} (100%) rename samples/hosted-agent/python/agent/{.env.tpl => .env} (100%) rename samples/hosted-agent/python/agent/{agent.yaml.tpl => agent.yaml} (100%) rename samples/hosted-agent/python/workflow/{.env.tpl => .env} (100%) rename samples/hosted-agent/python/workflow/{agent.yaml.tpl => agent.yaml} (100%) diff --git a/samples/hosted-agent/dotnet/agent/.env.tpl b/samples/hosted-agent/dotnet/agent/.env.tpl deleted file mode 100644 index 32b9dee..0000000 --- a/samples/hosted-agent/dotnet/agent/.env.tpl +++ /dev/null @@ -1,4 +0,0 @@ -Azure__ProjectEndpoint="{{{AzureAIProjectEndpoint}}}" - -# Replace with your model deployment name, by default using gpt-4.1-mini -Azure__ModelDeploymentName=gpt-4.1-mini diff --git a/samples/hosted-agent/dotnet/agent/Dockerfile.tpl b/samples/hosted-agent/dotnet/agent/Dockerfile similarity index 100% rename from samples/hosted-agent/dotnet/agent/Dockerfile.tpl rename to samples/hosted-agent/dotnet/agent/Dockerfile diff --git a/samples/hosted-agent/dotnet/agent/Program.cs.tpl b/samples/hosted-agent/dotnet/agent/Program.cs similarity index 100% rename from samples/hosted-agent/dotnet/agent/Program.cs.tpl rename to samples/hosted-agent/dotnet/agent/Program.cs diff --git a/samples/hosted-agent/dotnet/agent/agent.yaml.tpl b/samples/hosted-agent/dotnet/agent/agent.yaml similarity index 100% rename from samples/hosted-agent/dotnet/agent/agent.yaml.tpl rename to samples/hosted-agent/dotnet/agent/agent.yaml diff --git a/samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl b/samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl deleted file mode 100644 index 8a9170c..0000000 --- a/samples/hosted-agent/dotnet/agent/appsettings.Development.json.tpl +++ /dev/null @@ -1,6 +0,0 @@ -{ - "Azure": { - "ProjectEndpoint": "{{{AzureAIProjectEndpoint}}}", - "ModelDeploymentName": "gpt-4.1-mini" - } -} diff --git a/samples/hosted-agent/dotnet/workflow/.env.tpl b/samples/hosted-agent/dotnet/workflow/.env similarity index 100% rename from samples/hosted-agent/dotnet/workflow/.env.tpl rename to samples/hosted-agent/dotnet/workflow/.env diff --git a/samples/hosted-agent/dotnet/workflow/Dockerfile.tpl b/samples/hosted-agent/dotnet/workflow/Dockerfile similarity index 100% rename from samples/hosted-agent/dotnet/workflow/Dockerfile.tpl rename to samples/hosted-agent/dotnet/workflow/Dockerfile diff --git a/samples/hosted-agent/dotnet/workflow/Program.cs.tpl b/samples/hosted-agent/dotnet/workflow/Program.cs similarity index 100% rename from samples/hosted-agent/dotnet/workflow/Program.cs.tpl rename to samples/hosted-agent/dotnet/workflow/Program.cs diff --git a/samples/hosted-agent/dotnet/workflow/agent.yaml.tpl b/samples/hosted-agent/dotnet/workflow/agent.yaml similarity index 100% rename from samples/hosted-agent/dotnet/workflow/agent.yaml.tpl rename to samples/hosted-agent/dotnet/workflow/agent.yaml diff --git a/samples/hosted-agent/dotnet/workflow/appsettings.Development.json.tpl b/samples/hosted-agent/dotnet/workflow/appsettings.Development.json similarity index 100% rename from samples/hosted-agent/dotnet/workflow/appsettings.Development.json.tpl rename to samples/hosted-agent/dotnet/workflow/appsettings.Development.json diff --git a/samples/hosted-agent/python/agent/.env.tpl b/samples/hosted-agent/python/agent/.env similarity index 100% rename from samples/hosted-agent/python/agent/.env.tpl rename to samples/hosted-agent/python/agent/.env diff --git a/samples/hosted-agent/python/agent/agent.yaml.tpl b/samples/hosted-agent/python/agent/agent.yaml similarity index 100% rename from samples/hosted-agent/python/agent/agent.yaml.tpl rename to samples/hosted-agent/python/agent/agent.yaml diff --git a/samples/hosted-agent/python/workflow/.env.tpl b/samples/hosted-agent/python/workflow/.env similarity index 100% rename from samples/hosted-agent/python/workflow/.env.tpl rename to samples/hosted-agent/python/workflow/.env diff --git a/samples/hosted-agent/python/workflow/agent.yaml.tpl b/samples/hosted-agent/python/workflow/agent.yaml similarity index 100% rename from samples/hosted-agent/python/workflow/agent.yaml.tpl rename to samples/hosted-agent/python/workflow/agent.yaml From 23067d4da34f2d16ab7bb79bb0a16307c0b62d30 Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Wed, 4 Mar 2026 20:32:36 +0800 Subject: [PATCH 19/21] feat: enhance release workflow to support channel-based versioning and artifact management --- .github/workflows/release_sample.yml | 32 +++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release_sample.yml b/.github/workflows/release_sample.yml index 6ec506a..07880a3 100644 --- a/.github/workflows/release_sample.yml +++ b/.github/workflows/release_sample.yml @@ -1,10 +1,16 @@ name: Release Hosted Agents on: - push: - branches: - - hui/workflow-samples workflow_dispatch: + inputs: + channel: + description: "Release channel" + required: true + type: choice + options: + - stable + - pre-release + default: stable permissions: contents: write @@ -18,11 +24,26 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Read version from manifest + - name: Read version and branch from manifest id: version run: | - VERSION=$(jq -r '.channels.stable.sampleVersion' samples/hosted-agent/version-manifest.json) + CHANNEL="${{ github.event.inputs.channel }}" + VERSION=$(jq -r ".channels.\"$CHANNEL\".sampleVersion" samples/hosted-agent/version-manifest.json) + BRANCH=$(jq -r ".channels.\"$CHANNEL\".branch" samples/hosted-agent/version-manifest.json) echo "tag=$VERSION" >> $GITHUB_OUTPUT + echo "branch=$BRANCH" >> $GITHUB_OUTPUT + + - name: Checkout sample branch + uses: actions/checkout@v4 + with: + ref: ${{ steps.version.outputs.branch }} + path: sample-source + + - name: Copy samples from target branch + run: | + rm -rf samples/hosted-agent/dotnet samples/hosted-agent/python + cp -r sample-source/samples/hosted-agent/dotnet samples/hosted-agent/dotnet + cp -r sample-source/samples/hosted-agent/python samples/hosted-agent/python - name: Create dotnet agent package run: | @@ -68,6 +89,7 @@ jobs: with: tag_name: ${{ needs.build.outputs.version }} name: Hosted Agents ${{ needs.build.outputs.version }} + prerelease: ${{ github.event.inputs.channel == 'pre-release' }} files: | samples/hosted-agent/dotnet.zip samples/hosted-agent/python.zip From a0388d62e0e2856cf0d9a0ba3ef01b7a028bb02a Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Thu, 5 Mar 2026 13:30:00 +0800 Subject: [PATCH 20/21] fix: disable release notes generation in release workflow --- .github/workflows/release_sample.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release_sample.yml b/.github/workflows/release_sample.yml index 07880a3..05d1ab4 100644 --- a/.github/workflows/release_sample.yml +++ b/.github/workflows/release_sample.yml @@ -93,6 +93,6 @@ jobs: files: | samples/hosted-agent/dotnet.zip samples/hosted-agent/python.zip - generate_release_notes: true + generate_release_notes: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 84f9c0e3a4a1f85e41510631bfb8bfa817bb368f Mon Sep 17 00:00:00 2001 From: "huimiu[safe]" Date: Thu, 5 Mar 2026 14:06:46 +0800 Subject: [PATCH 21/21] fix: update stable branch in version manifest and remove pre-release section --- samples/hosted-agent/version-manifest.json | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/samples/hosted-agent/version-manifest.json b/samples/hosted-agent/version-manifest.json index 62fe48f..cabf8eb 100644 --- a/samples/hosted-agent/version-manifest.json +++ b/samples/hosted-agent/version-manifest.json @@ -3,18 +3,11 @@ "lastUpdated": "2026-02-28T10:00:00Z", "channels": { "stable": { - "branch": "hui/workflow-samples", + "branch": "stable", "sampleVersion": "v1.0.0", "lang": ["dotnet", "python"], "downloadUrl": "https://github.com/microsoft/ai-foundry-for-vscode/releases/download/v1.0.0/{{lang}}.zip", "lastUpdated": "2026-02-28T10:00:00Z" - }, - "pre-release": { - "branch": "hui/workflow-samples-pre-release", - "sampleVersion": "v1.1.0", - "lang": ["dotnet", "python"], - "downloadUrl": "https://github.com/microsoft/ai-foundry-for-vscode/releases/download/v1.1.0/{{lang}}.zip", - "lastUpdated": "2026-02-28T10:00:00Z" } } }