diff --git a/common/py/anthropic.py b/common/py/anthropic.py index 16d1140..ed7e737 100644 --- a/common/py/anthropic.py +++ b/common/py/anthropic.py @@ -1,16 +1,16 @@ from agentuity import AgentRequest, AgentResponse, AgentContext -from anthropic import Anthropic +from anthropic import AsyncAnthropic -client = Anthropic() +client = AsyncAnthropic() async def run(request: AgentRequest, response: AgentResponse, context: AgentContext): - result = client.messages.create( + result = await client.messages.create( max_tokens=1024, messages=[ { "role": "user", - "content": request.data.text or "Hello, Claude", + "content": await request.data.text() or "Hello, Claude", } ], model="claude-3-5-sonnet-latest", diff --git a/common/py/crewai/agent.py b/common/py/crewai/agent.py index 5539ba9..4b13986 100644 --- a/common/py/crewai/agent.py +++ b/common/py/crewai/agent.py @@ -3,6 +3,7 @@ async def run(request: AgentRequest, response: AgentResponse, context: AgentContext): - inputs = {"topic": request.data.text or "AI LLMs"} - result = MyCrew().crew().kickoff(inputs=inputs) + inputs = {"topic": await request.data.text() or "AI LLMs"} + crew = MyCrew().crew() + result = await crew.kickoff_async(inputs=inputs) return response.text(str(result)) diff --git a/common/py/langchain/openai.py b/common/py/langchain/openai.py index 8633ec9..68708c7 100644 --- a/common/py/langchain/openai.py +++ b/common/py/langchain/openai.py @@ -18,6 +18,6 @@ async def run(request: AgentRequest, response: AgentResponse, context: AgentCont ) output_parser = StrOutputParser() chain = prompt | llm | output_parser - result = chain.invoke({"input": request.data.text}) + result = await chain.ainvoke({"input": await request.data.text() or "Tell me about AI"}) return response.text(result) diff --git a/common/py/litellm.py b/common/py/litellm.py index 82e351f..20adc20 100644 --- a/common/py/litellm.py +++ b/common/py/litellm.py @@ -1,8 +1,8 @@ -from litellm import completion +from litellm import acompletion from agentuity import AgentRequest, AgentResponse, AgentContext async def run(request: AgentRequest, response: AgentResponse, context: AgentContext): - messages = [{"content": request.data.text or "Hello, how are you?", "role": "user"}] - result = completion(model="openai/gpt-4o", messages=messages) + messages = [{"content": await request.data.text() or "Hello, how are you?", "role": "user"}] + result = await acompletion(model="openai/gpt-4o", messages=messages) return response.text(result.choices[0].message.content) diff --git a/common/py/llamaindex/openai.py b/common/py/llamaindex/openai.py index 5ff18ae..4f2a96b 100644 --- a/common/py/llamaindex/openai.py +++ b/common/py/llamaindex/openai.py @@ -18,5 +18,5 @@ def multiply(a: float, b: float) -> float: async def run(request: AgentRequest, response: AgentResponse, context: AgentContext): - result = await agent.run(request.data.text or "What is 1234 * 4567?") + result = await agent.run(await request.data.text() or "What is 1234 * 4567?") return response.text(str(result)) diff --git a/common/py/openai.py b/common/py/openai.py index d68080d..0aaf6df 100644 --- a/common/py/openai.py +++ b/common/py/openai.py @@ -1,11 +1,11 @@ -from openai import OpenAI +from openai import AsyncOpenAI from agentuity import AgentRequest, AgentResponse, AgentContext -client = OpenAI() +client = AsyncOpenAI() async def run(request: AgentRequest, response: AgentResponse, context: AgentContext): - chat_completion = client.chat.completions.create( + chat_completion = await client.chat.completions.create( messages=[ { "role": "system", @@ -13,7 +13,7 @@ async def run(request: AgentRequest, response: AgentResponse, context: AgentCont }, { "role": "user", - "content": request.data.text or "Why is the sky blue?", + "content": await request.data.text() or "Why is the sky blue?", }, ], model="gpt-4o", diff --git a/python-uv/templates.yaml b/python-uv/templates.yaml index 06c5965..4ab5ba1 100644 --- a/python-uv/templates.yaml +++ b/python-uv/templates.yaml @@ -52,7 +52,8 @@ args: - add - --quiet - - llama-index + - llama-index==0.12.33 + - llama-index-core==0.12.33 - action: create_file filename: "agents/{{ .AgentName | safe_filename }}/agent.py" from: "common/py/llamaindex/openai.py"