Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,19 @@ Or one of the above command can be ran to run the agent with multiple instructio
--backend <agent name>
```

Add the above option to run the agent with agent name. Available agents: `codex`, `opencode`, and `gemini`.
Add the above option to run the agent with agent name. Available agents: `codex`, `opencode`, `gemini`, and `litellm`.

When using `--backend litellm`, the tool will call a LiteLLM-compatible HTTP endpoint (e.g., LM Studio). You can pass a custom base URL or API key either via flags or environment variables:

```bash
agent --backend litellm --model <model name> --litellm-provider openai --litellm-base-url http://localhost:1234/v1 --litellm-api-key <key-if-needed> "<your instructions>"

# or via env vars
LITELLM_PROVIDER=openai LITELLM_API_BASE=http://localhost:1234/v1 LITELLM_API_KEY=<key-if-needed> agent --backend litellm --model <model name> "<your instructions>"
```

Notes for LM Studio / OpenAI-compatible servers:
- Default provider is `openai` and default model is `gpt-3.5-turbo`. Override `--model` with your served model name and keep the `openai` provider prefix for OpenAI-compatible APIs.

```bash
--mode <mode>
Expand Down
85 changes: 80 additions & 5 deletions agent/agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#!/usr/bin/python3

import os
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there any reason we need to split to new import group?

Suggested change
import os
import litellm
import os

import pathlib
import subprocess
import typer
Expand All @@ -9,11 +10,14 @@
from enum import Enum
from typing_extensions import Annotated

import litellm

Comment on lines +13 to +14
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a nitpick to keep import in one group.

Suggested change
import litellm


class Backend(str, Enum):
codex = 'codex'
opencode = 'opencode'
gemini = 'gemini'
litellm = 'litellm'


class Mode(str, Enum):
Expand All @@ -28,7 +32,27 @@ def run(cwd, *args, **kwargs):
return subprocess.run(args, **kwargs)


def run_agent(cwd, instructions, backend, mode, model):
def run_litellm(instructions, model, base_url, api_key, provider):
chosen_model = model or 'gpt-3.5-turbo'
chosen_provider = provider or os.environ.get('LITELLM_PROVIDER') or 'openai'
if '/' not in chosen_model:
chosen_model = f'{chosen_provider}/{chosen_model}'
api_base = base_url or os.environ.get('LITELLM_API_BASE')
api_base = api_base or 'http://localhost:1234/v1'
key = api_key or os.environ.get('LITELLM_API_KEY')
response = litellm.completion(
model=chosen_model,
messages=[{'role': 'user', 'content': instructions}],
api_base=api_base,
api_key=key,
)
if not response.choices:
return
content = response.choices[0].message['content']
print(content)


def run_agent(cwd, instructions, backend, mode, model, litellm_base_url, litellm_api_key, litellm_provider):
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
def run_agent(cwd, instructions, backend, mode, model, litellm_base_url, litellm_api_key, litellm_provider):
def run_agent(
cwd,
instructions,
backend,
mode,
model,
litellm_base_url,
litellm_api_key,
litellm_provider
):

if backend == 'codex':
cmd_args = ['codex', '--full-auto']
if model:
Expand All @@ -53,14 +77,23 @@ def run_agent(cwd, instructions, backend, mode, model):
else:
cmd_args.extend(['--approval-mode', 'auto_edit'])
cmd_args.append(instructions)
elif backend == 'litellm':
run_litellm(
instructions,
model,
litellm_base_url,
litellm_api_key,
litellm_provider,
)
return
run(cwd, *cmd_args)


def inject_var(command):
return command.replace('{module_dir}', '.')


def run_workflow(workflow_dir, workflow, backend, mode, model):
def run_workflow(workflow_dir, workflow, backend, mode, model, litellm_base_url, litellm_api_key, litellm_provider):
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
def run_workflow(workflow_dir, workflow, backend, mode, model, litellm_base_url, litellm_api_key, litellm_provider):
def run_workflow(
workflow_dir,
workflow,
backend,
mode,
model,
litellm_base_url,
litellm_api_key,
litellm_provider
):

for step in workflow['steps']:
if step.get('ignore', False):
continue
Expand Down Expand Up @@ -96,7 +129,16 @@ def run_workflow(workflow_dir, workflow, backend, mode, model):
instruction += f.read()
instruction += '\n```\n'
if not error:
run_agent(cwd, instruction, backend, mode, model)
run_agent(
cwd,
instruction,
backend,
mode,
model,
litellm_base_url,
litellm_api_key,
litellm_provider,
)


def main(
Expand All @@ -120,6 +162,21 @@ def main(
' instructions is not provided, will override instructions'
' or instructions_file',
)] = '',
litellm_base_url: Annotated[str, typer.Option(
'--litellm-base-url',
envvar='LITELLM_API_BASE',
help='Base URL for LiteLLM-compatible server (e.g., LM Studio)',
)] = '',
litellm_api_key: Annotated[str, typer.Option(
'--litellm-api-key',
envvar='LITELLM_API_KEY',
help='API key to forward to the LiteLLM-compatible server if required',
)] = '',
litellm_provider: Annotated[str, typer.Option(
'--litellm-provider',
envvar='LITELLM_PROVIDER',
help='LiteLLM provider prefix (e.g., openai, ollama, together)',
)] = '',
):
assert instructions_file or instructions or workflow, (
'Either instructions_file or instructions or workflow must be provided.'
Expand All @@ -128,12 +185,30 @@ def main(
workflow_dir = pathlib.Path(workflow).expanduser().resolve().parent
with open(workflow, 'r') as file:
workflow = yaml.safe_load(file)[0]
run_workflow(workflow_dir, workflow, backend, mode, model)
run_workflow(
workflow_dir,
workflow,
backend,
mode,
model,
litellm_base_url,
litellm_api_key,
litellm_provider,
)
else:
if instructions_file:
with open(instructions_file, 'r') as file:
instructions = file.read()
run_agent('.', instructions, backend, mode, model)
run_agent(
'.',
instructions,
backend,
mode,
model,
litellm_base_url,
litellm_api_key,
litellm_provider,
)


def cli():
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@ authors = [{name = "Hai Lang", email = "hailn@trobz.com"}]
requires-python = ">=3.10"
readme = "README.md"
dependencies = [
"typer>=0.19.2"
"typer>=0.19.2",
"litellm>=1.52.0",
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please help to bump project version in line no. 3 to 0.3.0.

]

[project.urls]
Expand Down Expand Up @@ -35,4 +36,3 @@ build-backend = "uv_build"
module-name = "agent"
module-root = ""


1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
typer
litellm>=1.52.0