Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions .env

This file was deleted.

2 changes: 2 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Copy this to .env and set any necessary secrets
OPENAI_API_KEY=sk-your-openai-api-key-here
69 changes: 69 additions & 0 deletions .github/workflows/check-regeneration.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: Check Template Regeneration

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
check-template:
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.13'

- name: Install uv
uses: astral-sh/setup-uv@v3

- name: Run regeneration check
run: uv run copier/copy_utils.py check-regeneration

check-python:
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.13'

- name: Install uv
uses: astral-sh/setup-uv@v3

- name: Run Python checks
run: uv run hatch run all-check
working-directory: test-proj

check-ui:
runs-on: ubuntu-latest

steps:
- name: Checkout repository
uses: actions/checkout@v4

- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: '24'

- name: Enable Corepack
run: corepack enable

- name: Activate pnpm version
working-directory: test-proj/ui
run: corepack prepare --activate


- name: Run UI checks
run: pnpm run all-check
working-directory: test-proj/ui
4 changes: 4 additions & 0 deletions .gitignore.jinja
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
.env
__pycache__
workflows.db
.venv
20 changes: 4 additions & 16 deletions copier.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,33 +8,21 @@ project_name:
Project name must contain only letters, numbers, and dashes
{% endif %}

llama_project_id:
type: str
help: What is your Llama Cloud project ID?
default: ""
required: true

llama_org_id:
project_title:
type: str
help: What is your Llama Cloud organization ID?
default: ""
required: true
help: What is the title of your project? This will be used in the UI Title Bar.
default: "{{ project_name.replace('-', ' ').title() }}"

# computed variables
project_name_snake:
type: str
default: "{{ project_name.replace('-', '_') }}"
when: false

project_title:
type: str
default: "{{ (project_name.replace('-', ' '))[:1] | upper ~ (project_name.replace('-', ' '))[1:] }}"
when: false

_exclude:
- "test-proj"
- ".git"
- ".github"
- "copier"
- "CONTRIBUTING.md"
- "copier.yaml"
- "copier.yaml"
1 change: 1 addition & 0 deletions copier/copy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def run_copier_quietly(src_path: str, dst_path: str, data: Dict[str, str]) -> No
data=data,
unsafe=True,
quiet=True,
vcs_ref="HEAD",
)


Expand Down
8 changes: 0 additions & 8 deletions llama_deploy.yaml

This file was deleted.

8 changes: 0 additions & 8 deletions llama_deploy.yaml.jinja

This file was deleted.

25 changes: 0 additions & 25 deletions pyproject.toml

This file was deleted.

46 changes: 34 additions & 12 deletions pyproject.toml.jinja
Original file line number Diff line number Diff line change
@@ -1,26 +1,48 @@
[project]
name = "{{project_name_snake}}"
name = "{{ project_name_snake }}"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
authors = [
{ name = "Terry Zhao", email = "terry@runllama.ai" }
]
authors = []
requires-python = ">=3.12"
dependencies = [
"llama-index-workflows>=2.2.0",
"python-cowsay>=1.2.1",
"llama-cloud-services>=0.6.0",
"llama-index-core>=0.12.0",
"llama-index-llms-openai>=0.3.0",
"llama-index-embeddings-openai>=0.3.0",
"python-dotenv>=1.0.1",
"llama-index-workflows>=2.2.0,<3.0.0",
"llama-cloud-services>=0.6.68",
"llama-index-core>=0.14.0",
"llama-index-llms-openai>=0.5.6",
"llama-index-embeddings-openai>=0.5.1",
"python-dotenv>=1.1.1",
]

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[dependency-groups]
dev = []
dev = [
"hatch>=1.14.1",
"pytest>=8.4.2",
"ruff>=0.13.0",
"ty>=0.0.1a20",
]

[tool.hatch.envs.default.scripts]
"format" = "ruff format ."
"format-check" = "ruff format --check ."
"lint" = "ruff check --fix ."
"lint-check" = ["ruff check ."]
typecheck = "ty check src"
test = "pytest"
"all-check" = ["format-check", "lint-check", "test"]
"all-fix" = ["format", "lint", "test"]

[tool.llamadeploy]
env-files = [".env"]
llama_cloud = true

[tool.llamadeploy.ui]
directory = "./ui"

[tool.llamadeploy.workflows]
upload = "test_proj.qa_workflows:upload"
chat = "test_proj.qa_workflows:chat"
52 changes: 52 additions & 0 deletions src/{{ project_name_snake }}/clients.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import functools
import os
import httpx

from llama_cloud.client import AsyncLlamaCloud
from llama_cloud_services import LlamaParse

# deployed agents may infer their name from the deployment name
# Note: Make sure that an agent deployment with this name actually exists
# otherwise calls to get or set data will fail. You may need to adjust the `or `
# name for development
DEPLOYMENT_NAME = os.getenv("LLAMA_DEPLOY_DEPLOYMENT_NAME")
# required for all llama cloud calls
LLAMA_CLOUD_API_KEY = os.environ["LLAMA_CLOUD_API_KEY"]
# get this in case running against a different environment than production
LLAMA_CLOUD_BASE_URL = os.getenv("LLAMA_CLOUD_BASE_URL")
LLAMA_CLOUD_PROJECT_ID = os.getenv("LLAMA_DEPLOY_PROJECT_ID")
INDEX_NAME = "document_qa_index"


def get_custom_client() -> httpx.AsyncClient:
return httpx.AsyncClient(
timeout=60,
headers={"Project-Id": LLAMA_CLOUD_PROJECT_ID}
if LLAMA_CLOUD_PROJECT_ID
else None,
)


@functools.cache
def get_llama_cloud_client() -> AsyncLlamaCloud:
return AsyncLlamaCloud(
base_url=LLAMA_CLOUD_BASE_URL,
token=LLAMA_CLOUD_API_KEY,
httpx_client=get_custom_client(),
)


@functools.cache
def get_llama_parse_client() -> LlamaParse:
return LlamaParse(
parse_mode="parse_page_with_agent",
model="openai-gpt-4-1-mini",
high_res_ocr=True,
adaptive_long_table=True,
outlined_table_extraction=True,
output_tables_as_HTML=True,
result_type="markdown",
api_key=LLAMA_CLOUD_API_KEY,
project_id=LLAMA_CLOUD_PROJECT_ID,
custom_client=get_custom_client(),
)
Loading
Loading