Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 23 additions & 24 deletions clai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,28 +54,27 @@ Either way, running `clai` will start an interactive session where you can chat
## Help

```
usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt]

Pydantic AI CLI v...

Special prompts:
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
* `/markdown` - show the last markdown output of the last question
* `/multiline` - toggle multiline mode
* `/cp` - copy the last response to clipboard

positional arguments:
prompt AI Prompt, if omitted fall into interactive mode

options:
-h, --help show this help message and exit
-m [MODEL], --model [MODEL]
Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
-a AGENT, --agent AGENT
Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
-l, --list-models List all available models and exit
-t [CODE_THEME], --code-theme [CODE_THEME]
Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.
--no-stream Disable streaming from the model
--version Show version and exit
Usage: clai [OPTIONS] [PROMPT]

Pydantic AI CLI v...

Special prompts: * `/exit` - exit the interactive mode (ctrl-c and ctrl-d
also work) * `/markdown` - show the last markdown output of the last
question * `/multiline` - toggle multiline mode * `/cp` - copy the last
response to clipboard

Options:
-m, --model MODEL Model to use, in format "<provider>:<model>" e.g.
"openai:gpt-4.1" or "anthropic:claude-sonnet-4-0".
Defaults to "openai:gpt-4.1".
-a, --agent MODULE:VAR Custom Agent to use, in format "module:variable",
e.g. "mymodule.submodule:my_agent"
-l, --list-models List all available models and exit
-t, --code-theme THEME Which colors to use for code, can be "dark", "light"
or any theme from pygments.org/styles/. Defaults to
"dark" which works well on dark terminals.
[default: dark]
--no-stream Disable streaming from the model
--version Show version and exit
-h, --help Show this message and exit.
```
2 changes: 1 addition & 1 deletion docs/install.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ pip/uv-add "pydantic-ai-slim[openai]"
* `huggingface` - installs `huggingface-hub[inference]` [PyPI ↗](https://pypi.org/project/huggingface-hub){:target="_blank"}
* `duckduckgo` - installs `ddgs` [PyPI ↗](https://pypi.org/project/ddgs){:target="_blank"}
* `tavily` - installs `tavily-python` [PyPI ↗](https://pypi.org/project/tavily-python){:target="_blank"}
* `cli` - installs `rich` [PyPI ↗](https://pypi.org/project/rich){:target="_blank"}, `prompt-toolkit` [PyPI ↗](https://pypi.org/project/prompt-toolkit){:target="_blank"}, and `argcomplete` [PyPI ↗](https://pypi.org/project/argcomplete){:target="_blank"}
* `cli` - installs `rich` [PyPI ↗](https://pypi.org/project/rich){:target="_blank"}, `prompt-toolkit` [PyPI ↗](https://pypi.org/project/prompt-toolkit){:target="_blank"}, and `pyperclip` [PyPI ↗](https://pypi.org/project/pyperclip){:target="_blank"}
* `mcp` - installs `mcp` [PyPI ↗](https://pypi.org/project/mcp){:target="_blank"}
* `a2a` - installs `fasta2a` [PyPI ↗](https://pypi.org/project/fasta2a){:target="_blank"}
* `ag-ui` - installs `ag-ui-protocol` [PyPI ↗](https://pypi.org/project/ag-ui-protocol){:target="_blank"} and `starlette` [PyPI ↗](https://pypi.org/project/starlette){:target="_blank"}
Expand Down
221 changes: 135 additions & 86 deletions pydantic_ai_slim/pydantic_ai/_cli.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from __future__ import annotations as _annotations

import argparse
import asyncio
import importlib
import os
Expand All @@ -12,6 +11,7 @@
from pathlib import Path
from typing import Any, cast

import click
from typing_inspection.introspection import get_literal_values

from . import __version__
Expand All @@ -23,7 +23,6 @@
from .output import OutputDataT

try:
import argcomplete
import pyperclip
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory, Suggestion
Expand All @@ -39,7 +38,7 @@
from rich.text import Text
except ImportError as _import_error:
raise ImportError(
'Please install `rich`, `prompt-toolkit`, `pyperclip` and `argcomplete` to use the Pydantic AI CLI, '
'Please install `rich`, `prompt-toolkit`, and `pyperclip` to use the Pydantic AI CLI, '
'you can use the `cli` optional group — `pip install "pydantic-ai-slim[cli]"`'
) from _import_error

Expand Down Expand Up @@ -82,6 +81,12 @@ def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderR
heading_open=LeftHeading,
)

_DEFAULT_CLI_MODEL = 'openai:gpt-4.1'
_MODEL_OPTION_HELP_TEMPLATE = (
'Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or '
'"anthropic:claude-sonnet-4-0". Defaults to "{default_model}".'
)
_CLI_CONTEXT_DEFAULTS_KEY = 'default_model'

cli_agent = Agent()

Expand All @@ -97,129 +102,173 @@ def cli_system_prompt() -> str:
The user is running {sys.platform}."""


def cli_exit(prog_name: str = 'pai'): # pragma: no cover
"""Run the CLI and exit."""
sys.exit(cli(prog_name=prog_name))

@click.command(
context_settings={
'help_option_names': ['-h', '--help'],
},
help=(
f'Pydantic AI CLI v{__version__}\n\n'
'Special prompts:\n'
'* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)\n'
'* `/markdown` - show the last markdown output of the last question\n'
'* `/multiline` - toggle multiline mode\n'
'* `/cp` - copy the last response to clipboard\n'
),
)
@click.argument('prompt', required=False)
@click.option(
'-m',
'--model',
metavar='MODEL',
help=_MODEL_OPTION_HELP_TEMPLATE.format(default_model=_DEFAULT_CLI_MODEL),
)
@click.option(
'-a',
'--agent',
metavar='MODULE:VAR',
help=('Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"'),
)
@click.option('-l', '--list-models', is_flag=True, help='List all available models and exit')
@click.option(
'-t',
'--code-theme',
default='dark',
metavar='THEME',
help=(
'Which colors to use for code, can be "dark", "light" or any theme from '
'pygments.org/styles/. Defaults to "dark" which works well on dark terminals.'
),
show_default=True,
)
@click.option('--no-stream', is_flag=True, help='Disable streaming from the model')
@click.option('--version', is_flag=True, help='Show version and exit')
@click.pass_context
def _click_main( # noqa: C901
ctx: click.Context,
prompt: str | None,
model: str | None,
agent: str | None,
list_models: bool,
code_theme: str,
no_stream: bool,
version: bool,
) -> int | None:
"""Command body (invoked by Click)."""
context_defaults: dict[str, Any] = ctx.obj or {}
default_model = context_defaults.get(_CLI_CONTEXT_DEFAULTS_KEY, _DEFAULT_CLI_MODEL)
prog_name = ctx.info_name or 'pai'

def cli( # noqa: C901
args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = 'openai:gpt-4.1'
) -> int:
"""Run the CLI and return the exit code for the process."""
parser = argparse.ArgumentParser(
prog=prog_name,
description=f"""\
Pydantic AI CLI v{__version__}\n\n

Special prompts:
* `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
* `/markdown` - show the last markdown output of the last question
* `/multiline` - toggle multiline mode
* `/cp` - copy the last response to clipboard
""",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('prompt', nargs='?', help='AI Prompt, if omitted fall into interactive mode')
arg = parser.add_argument(
'-m',
'--model',
nargs='?',
help=f'Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "{default_model}".',
)
# we don't want to autocomplete or list models that don't include the provider,
# e.g. we want to show `openai:gpt-4o` but not `gpt-4o`
qualified_model_names = [n for n in get_literal_values(KnownModelName.__value__) if ':' in n]
arg.completer = argcomplete.ChoicesCompleter(qualified_model_names) # type: ignore[reportPrivateUsage]
parser.add_argument(
'-a',
'--agent',
help='Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"',
)
parser.add_argument(
'-l',
'--list-models',
action='store_true',
help='List all available models and exit',
)
parser.add_argument(
'-t',
'--code-theme',
nargs='?',
help='Which colors to use for code, can be "dark", "light" or any theme from pygments.org/styles/. Defaults to "dark" which works well on dark terminals.',
default='dark',
)
parser.add_argument('--no-stream', action='store_true', help='Disable streaming from the model')
parser.add_argument('--version', action='store_true', help='Show version and exit')

argcomplete.autocomplete(parser)
args = parser.parse_args(args_list)

console = Console()
name_version = f'[green]{prog_name} - Pydantic AI CLI v{__version__}[/green]'
if args.version:
if version:
console.print(name_version, highlight=False)
return 0
if args.list_models:
if list_models:
console.print(f'{name_version}\n\n[green]Available models:[/green]')
for model in qualified_model_names:
console.print(f' {model}', highlight=False)
for m in qualified_model_names:
console.print(f' {m}', highlight=False)
return 0

agent: Agent[None, str] = cli_agent
if args.agent:
agent_obj: Agent[None, str] = cli_agent
if agent:
sys.path.append(os.getcwd())
try:
module_path, variable_name = args.agent.split(':')
module_path, variable_name = agent.split(':')
except ValueError:
console.print('[red]Error: Agent must be specified in "module:variable" format[/red]')
return 1
raise click.exceptions.Exit(1)

module = importlib.import_module(module_path)
agent = getattr(module, variable_name)
if not isinstance(agent, Agent):
console.print(f'[red]Error: {args.agent} is not an Agent instance[/red]')
return 1
agent_obj = getattr(module, variable_name)
if not isinstance(agent_obj, Agent):
console.print(f'[red]Error: {agent} is not an Agent instance[/red]')
raise click.exceptions.Exit(1)

model_arg_set = args.model is not None
if agent.model is None or model_arg_set:
model_arg_set = model is not None
if agent_obj.model is None or model_arg_set:
try:
agent.model = infer_model(args.model or default_model)
agent_obj.model = infer_model(model or default_model)
except UserError as e:
console.print(f'Error initializing [magenta]{args.model}[/magenta]:\n[red]{e}[/red]')
return 1
console.print(f'Error initializing [magenta]{model}[/magenta]:\n[red]{e}[/red]')
raise click.exceptions.Exit(1)

model_name = agent.model if isinstance(agent.model, str) else f'{agent.model.system}:{agent.model.model_name}'
if args.agent and model_arg_set:
model_name = (
agent_obj.model
if isinstance(agent_obj.model, str)
else f'{agent_obj.model.system}:{agent_obj.model.model_name}'
)
if agent and model_arg_set:
console.print(
f'{name_version} using custom agent [magenta]{args.agent}[/magenta] with [magenta]{model_name}[/magenta]',
f'{name_version} using custom agent [magenta]{agent}[/magenta] with [magenta]{model_name}[/magenta]',
highlight=False,
)
elif args.agent:
console.print(f'{name_version} using custom agent [magenta]{args.agent}[/magenta]', highlight=False)
elif agent:
console.print(f'{name_version} using custom agent [magenta]{agent}[/magenta]', highlight=False)
else:
console.print(f'{name_version} with [magenta]{model_name}[/magenta]', highlight=False)

stream = not args.no_stream
if args.code_theme == 'light':
code_theme = 'default'
elif args.code_theme == 'dark':
code_theme = 'monokai'
stream = not no_stream
if code_theme == 'light':
code_theme_name = 'default'
elif code_theme == 'dark':
code_theme_name = 'monokai'
else:
code_theme = args.code_theme # pragma: no cover
code_theme_name = code_theme # pragma: no cover

if prompt := cast(str, args.prompt):
if prompt:
try:
asyncio.run(ask_agent(agent, prompt, stream, console, code_theme))
asyncio.run(ask_agent(agent_obj, prompt, stream, console, code_theme_name))
except KeyboardInterrupt:
pass
return 0

try:
return asyncio.run(run_chat(stream, agent, console, code_theme, prog_name))
return asyncio.run(run_chat(stream, agent_obj, console, code_theme_name, prog_name))
except KeyboardInterrupt: # pragma: no cover
return 0


_MODEL_OPTION = cast(click.Option, next(param for param in _click_main.params if param.name == 'model'))


def cli_exit(prog_name: str = 'pai'): # pragma: no cover
"""Run the CLI and exit."""
sys.exit(cli(prog_name=prog_name))


def cli(
args_list: Sequence[str] | None = None, *, prog_name: str = 'pai', default_model: str = _DEFAULT_CLI_MODEL
) -> int:
"""Run the CLI and return the exit code for the process.

Uses Click for parsing, while preserving the previous API:
- Raises SystemExit on `--help` to satisfy the README hook test.
- Returns an int exit code for other invocations.
"""
args = list(args_list or [])

# keep the option help text in sync with the caller-provided default model
_MODEL_OPTION.help = _MODEL_OPTION_HELP_TEMPLATE.format(default_model=default_model)

context_defaults = {_CLI_CONTEXT_DEFAULTS_KEY: default_model}

if any(a in ('-h', '--help') for a in args):
_click_main.main(args=args, prog_name=prog_name, standalone_mode=True, obj=context_defaults)

try:
_click_main.main(args=args, prog_name=prog_name, standalone_mode=True, obj=context_defaults)
except SystemExit as e:
code = e.code
if isinstance(code, int):
return code
return 0 if code is None else 1 # pragma: no cover
return 0 # pragma: no cover


async def run_chat(
stream: bool,
agent: AbstractAgent[AgentDepsT, OutputDataT],
Expand Down
2 changes: 1 addition & 1 deletion pydantic_ai_slim/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ tavily = ["tavily-python>=0.5.0"]
cli = [
"rich>=13",
"prompt-toolkit>=3",
"argcomplete>=3.5.0",
"pyperclip>=1.9.0",
"click>=8.3.0",
]
# MCP
mcp = ["mcp>=1.12.3"]
Expand Down
9 changes: 9 additions & 0 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,15 @@ def test_list_models(capfd: CaptureFixture[str]):
assert models == set(), models


def test_help_uses_overridden_default_model(capfd: CaptureFixture[str]):
custom_default = 'anthropic:claude-3-5-sonnet'
with pytest.raises(SystemExit) as exc:
cli(['--help'], default_model=custom_default)
assert exc.value.code == 0
out = capfd.readouterr().out
assert f'Defaults to "{custom_default}".' in out


def test_cli_prompt(capfd: CaptureFixture[str], env: TestEnv):
env.set('OPENAI_API_KEY', 'test')
with cli_agent.override(model=TestModel(custom_output_text='# result\n\n```py\nx = 1\n```')):
Expand Down