Skip to content
This repository was archived by the owner on Jan 23, 2026. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions common/py/anthropic.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
from agentuity import AgentRequest, AgentResponse, AgentContext
from anthropic import Anthropic
from anthropic import AsyncAnthropic

client = Anthropic()
client = AsyncAnthropic()


async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
result = client.messages.create(
result = await client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": request.data.text or "Hello, Claude",
"content": await request.data.text() or "Hello, Claude",
}
],
model="claude-3-5-sonnet-latest",
Expand Down
5 changes: 3 additions & 2 deletions common/py/crewai/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@


async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
inputs = {"topic": request.data.text or "AI LLMs"}
result = MyCrew().crew().kickoff(inputs=inputs)
inputs = {"topic": await request.data.text() or "AI LLMs"}
crew = MyCrew().crew()
result = await crew.kickoff_async(inputs=inputs)
return response.text(str(result))
2 changes: 1 addition & 1 deletion common/py/langchain/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,6 @@ async def run(request: AgentRequest, response: AgentResponse, context: AgentCont
)
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
result = chain.invoke({"input": request.data.text})
result = await chain.ainvoke({"input": await request.data.text() or "Tell me about AI"})

return response.text(result)
6 changes: 3 additions & 3 deletions common/py/litellm.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from litellm import completion
from litellm import acompletion
from agentuity import AgentRequest, AgentResponse, AgentContext


async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
messages = [{"content": request.data.text or "Hello, how are you?", "role": "user"}]
result = completion(model="openai/gpt-4o", messages=messages)
messages = [{"content": await request.data.text() or "Hello, how are you?", "role": "user"}]
result = await acompletion(model="openai/gpt-4o", messages=messages)
return response.text(result.choices[0].message.content)
2 changes: 1 addition & 1 deletion common/py/llamaindex/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ def multiply(a: float, b: float) -> float:


async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
result = await agent.run(request.data.text or "What is 1234 * 4567?")
result = await agent.run(await request.data.text() or "What is 1234 * 4567?")
return response.text(str(result))
8 changes: 4 additions & 4 deletions common/py/openai.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
from openai import OpenAI
from openai import AsyncOpenAI
from agentuity import AgentRequest, AgentResponse, AgentContext

client = OpenAI()
client = AsyncOpenAI()


async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
chat_completion = client.chat.completions.create(
chat_completion = await client.chat.completions.create(
messages=[
{
"role": "system",
"content": "You are a friendly assistant!",
},
{
"role": "user",
"content": request.data.text or "Why is the sky blue?",
"content": await request.data.text() or "Why is the sky blue?",
},
],
model="gpt-4o",
Expand Down
3 changes: 2 additions & 1 deletion python-uv/templates.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@
args:
- add
- --quiet
- llama-index
- llama-index==0.12.33
- llama-index-core==0.12.33
- action: create_file
filename: "agents/{{ .AgentName | safe_filename }}/agent.py"
from: "common/py/llamaindex/openai.py"
Expand Down