From 45ac03ff86dfdf4c30827d498d4f9459191704c9 Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 12:57:45 -0400 Subject: [PATCH 1/8] adding test-proj --- .env | 3 - .gitignore.jinja | 4 + copier.yaml | 20 +- llama_deploy.yaml | 8 - llama_deploy.yaml.jinja | 8 - pyproject.toml | 25 - pyproject.toml.jinja | 29 +- .../__init__.py | 0 src/{{ project_name_snake }}/clients.py | 52 +++ .../config.py.jinja | 0 .../qa_workflows.py | 249 +++++----- .../__pycache__/__init__.cpython-312.pyc | Bin 159 -> 0 bytes .../__pycache__/clients.cpython-312.pyc | Bin 1440 -> 0 bytes .../__pycache__/qa_workflows.cpython-312.pyc | Bin 19269 -> 0 bytes .../__pycache__/workflows.cpython-312.pyc | Bin 11750 -> 0 bytes src/{{project_name_snake}}/clients.py | 34 -- test-proj/.copier-answers.yml | 6 + test-proj/.gitignore | 4 + test-proj/README.md | 17 + test-proj/pyproject.toml | 33 ++ test-proj/src/test_proj/__init__.py | 0 test-proj/src/test_proj/clients.py | 52 +++ test-proj/src/test_proj/config.py | 2 + test-proj/src/test_proj/qa_workflows.py | 339 ++++++++++++++ test-proj/tests/test_placeholder.py | 2 + test-proj/ui/.gitignore | 4 + test-proj/ui/index.html | 14 + test-proj/ui/package.json | 35 ++ test-proj/ui/postcss.config.mjs | 5 + test-proj/ui/src/App.tsx | 14 + test-proj/ui/src/components/ChatBot.tsx | 436 ++++++++++++++++++ test-proj/ui/src/index.css | 120 +++++ test-proj/ui/src/libs/clients.ts | 32 ++ test-proj/ui/src/libs/config.ts | 2 + test-proj/ui/src/libs/utils.ts | 3 + test-proj/ui/src/main.tsx | 13 + test-proj/ui/src/pages/Home.tsx | 46 ++ test-proj/ui/src/vite-env.d.ts | 16 + test-proj/ui/tsconfig.json | 22 + test-proj/ui/vite.config.ts | 49 ++ 40 files changed, 1469 insertions(+), 229 deletions(-) create mode 100644 .gitignore.jinja delete mode 100644 llama_deploy.yaml delete mode 100644 llama_deploy.yaml.jinja delete mode 100644 pyproject.toml rename src/{{{project_name_snake}} => {{ project_name_snake }}}/__init__.py (100%) create mode 100644 src/{{ project_name_snake }}/clients.py rename src/{{{project_name_snake}} => {{ project_name_snake }}}/config.py.jinja (100%) rename src/{{{project_name_snake}} => {{ project_name_snake }}}/qa_workflows.py (59%) delete mode 100644 src/{{project_name_snake}}/__pycache__/__init__.cpython-312.pyc delete mode 100644 src/{{project_name_snake}}/__pycache__/clients.cpython-312.pyc delete mode 100644 src/{{project_name_snake}}/__pycache__/qa_workflows.cpython-312.pyc delete mode 100644 src/{{project_name_snake}}/__pycache__/workflows.cpython-312.pyc delete mode 100644 src/{{project_name_snake}}/clients.py create mode 100644 test-proj/.copier-answers.yml create mode 100644 test-proj/.gitignore create mode 100644 test-proj/README.md create mode 100644 test-proj/pyproject.toml create mode 100644 test-proj/src/test_proj/__init__.py create mode 100644 test-proj/src/test_proj/clients.py create mode 100644 test-proj/src/test_proj/config.py create mode 100644 test-proj/src/test_proj/qa_workflows.py create mode 100644 test-proj/tests/test_placeholder.py create mode 100644 test-proj/ui/.gitignore create mode 100644 test-proj/ui/index.html create mode 100644 test-proj/ui/package.json create mode 100644 test-proj/ui/postcss.config.mjs create mode 100644 test-proj/ui/src/App.tsx create mode 100644 test-proj/ui/src/components/ChatBot.tsx create mode 100644 test-proj/ui/src/index.css create mode 100644 test-proj/ui/src/libs/clients.ts create mode 100644 test-proj/ui/src/libs/config.ts create mode 100644 test-proj/ui/src/libs/utils.ts create mode 100644 test-proj/ui/src/main.tsx create mode 100644 test-proj/ui/src/pages/Home.tsx create mode 100644 test-proj/ui/src/vite-env.d.ts create mode 100644 test-proj/ui/tsconfig.json create mode 100644 test-proj/ui/vite.config.ts diff --git a/.env b/.env index e578db1..0da2864 100644 --- a/.env +++ b/.env @@ -1,5 +1,2 @@ -# LlamaCloud API configuration -LLAMA_CLOUD_API_KEY=llx-your-api-key-here - # OpenAI API configuration OPENAI_API_KEY=sk-your-openai-api-key-here diff --git a/.gitignore.jinja b/.gitignore.jinja new file mode 100644 index 0000000..06b8b0d --- /dev/null +++ b/.gitignore.jinja @@ -0,0 +1,4 @@ +.env +__pycache__ +workflows.db +.venv diff --git a/copier.yaml b/copier.yaml index 8439d41..e3d6d67 100644 --- a/copier.yaml +++ b/copier.yaml @@ -8,29 +8,17 @@ project_name: Project name must contain only letters, numbers, and dashes {% endif %} -llama_project_id: - type: str - help: What is your Llama Cloud project ID? - default: "" - required: true - -llama_org_id: +project_title: type: str - help: What is your Llama Cloud organization ID? - default: "" - required: true + default: "{{ (project_name.replace('-', ' '))[:1] | upper ~ (project_name.replace('-', ' '))[1:] }}" + when: false # computed variables project_name_snake: type: str default: "{{ project_name.replace('-', '_') }}" when: false - -project_title: - type: str - default: "{{ (project_name.replace('-', ' '))[:1] | upper ~ (project_name.replace('-', ' '))[1:] }}" - when: false - + _exclude: - "test-proj" - ".git" diff --git a/llama_deploy.yaml b/llama_deploy.yaml deleted file mode 100644 index 1654876..0000000 --- a/llama_deploy.yaml +++ /dev/null @@ -1,8 +0,0 @@ -env_files: - - ".env" -llama_cloud: true -workflows: - upload: "document-qa.qa_workflows:DocumentUploadWorkflow" - chat: "document-qa.qa_workflows:ChatWorkflow" -ui: - directory: ui diff --git a/llama_deploy.yaml.jinja b/llama_deploy.yaml.jinja deleted file mode 100644 index be93469..0000000 --- a/llama_deploy.yaml.jinja +++ /dev/null @@ -1,8 +0,0 @@ -env_files: - - ".env" -llama_cloud: true -workflows: - upload: "{{project_name_snake}}.qa_workflows:DocumentUploadWorkflow" - chat: "{{project_name_snake}}.qa_workflows:ChatWorkflow" -ui: - directory: ui diff --git a/pyproject.toml b/pyproject.toml deleted file mode 100644 index 5fbb656..0000000 --- a/pyproject.toml +++ /dev/null @@ -1,25 +0,0 @@ -[project] -name = "document-qa" -version = "0.1.0" -description = "Add your description here" -readme = "README.md" -authors = [ - { name = "Terry Zhao", email = "terry@runllama.ai" } -] -requires-python = ">=3.12" -dependencies = [ - "llama-index-workflows>=2.2.0", - "python-cowsay>=1.2.1", - "llama-cloud-services>=0.6.0", - "llama-index-core>=0.12.0", - "llama-index-llms-openai>=0.3.0", - "llama-index-embeddings-openai>=0.3.0", - "python-dotenv>=1.0.1", -] - -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[dependency-groups] -dev = [] diff --git a/pyproject.toml.jinja b/pyproject.toml.jinja index e4a2fc3..762dd75 100644 --- a/pyproject.toml.jinja +++ b/pyproject.toml.jinja @@ -1,20 +1,17 @@ [project] -name = "{{project_name_snake}}" +name = "{{ project_name_snake }}" version = "0.1.0" description = "Add your description here" readme = "README.md" -authors = [ - { name = "Terry Zhao", email = "terry@runllama.ai" } -] +authors = [] requires-python = ">=3.12" dependencies = [ - "llama-index-workflows>=2.2.0", - "python-cowsay>=1.2.1", - "llama-cloud-services>=0.6.0", - "llama-index-core>=0.12.0", - "llama-index-llms-openai>=0.3.0", - "llama-index-embeddings-openai>=0.3.0", - "python-dotenv>=1.0.1", + "llama-index-workflows>=2.2.0,<3.0.0", + "llama-cloud-services>=0.6.68", + "llama-index-core>=0.14.0", + "llama-index-llms-openai>=0.5.6", + "llama-index-embeddings-openai>=0.5.1", + "python-dotenv>=1.1.1", ] [build-system] @@ -24,3 +21,13 @@ build-backend = "hatchling.build" [dependency-groups] dev = [] +[tool.llamadeploy] +env-files = [".env"] +llama_cloud = true + +[tool.llamadeploy.ui] +directory = "./ui" + +[tool.llamadeploy.workflows] +upload = "test_proj.qa_workflows:upload" +chat = "test_proj.qa_workflows:chat" diff --git a/src/{{project_name_snake}}/__init__.py b/src/{{ project_name_snake }}/__init__.py similarity index 100% rename from src/{{project_name_snake}}/__init__.py rename to src/{{ project_name_snake }}/__init__.py diff --git a/src/{{ project_name_snake }}/clients.py b/src/{{ project_name_snake }}/clients.py new file mode 100644 index 0000000..9f8d0d8 --- /dev/null +++ b/src/{{ project_name_snake }}/clients.py @@ -0,0 +1,52 @@ +import functools +import os +import httpx + +from llama_cloud.client import AsyncLlamaCloud +from llama_cloud_services import LlamaParse + +# deployed agents may infer their name from the deployment name +# Note: Make sure that an agent deployment with this name actually exists +# otherwise calls to get or set data will fail. You may need to adjust the `or ` +# name for development +DEPLOYMENT_NAME = os.getenv("LLAMA_DEPLOY_DEPLOYMENT_NAME") +# required for all llama cloud calls +LLAMA_CLOUD_API_KEY = os.environ["LLAMA_CLOUD_API_KEY"] +# get this in case running against a different environment than production +LLAMA_CLOUD_BASE_URL = os.getenv("LLAMA_CLOUD_BASE_URL") +LLAMA_CLOUD_PROJECT_ID = os.getenv("LLAMA_DEPLOY_PROJECT_ID") +INDEX_NAME = "document_qa_index" + + +def get_custom_client() -> httpx.AsyncClient: + return httpx.AsyncClient( + timeout=60, + headers={"Project-Id": LLAMA_CLOUD_PROJECT_ID} + if LLAMA_CLOUD_PROJECT_ID + else None, + ) + + +@functools.cache +def get_llama_cloud_client() -> AsyncLlamaCloud: + return AsyncLlamaCloud( + base_url=LLAMA_CLOUD_BASE_URL, + token=LLAMA_CLOUD_API_KEY, + httpx_client=get_custom_client(), + ) + + +@functools.cache +def get_llama_parse_client() -> LlamaParse: + return LlamaParse( + parse_mode="parse_page_with_agent", + model="openai-gpt-4-1-mini", + high_res_ocr=True, + adaptive_long_table=True, + outlined_table_extraction=True, + output_tables_as_HTML=True, + result_type="markdown", + api_key=LLAMA_CLOUD_API_KEY, + project_id=LLAMA_CLOUD_PROJECT_ID, + custom_client=get_custom_client(), + ) diff --git a/src/{{project_name_snake}}/config.py.jinja b/src/{{ project_name_snake }}/config.py.jinja similarity index 100% rename from src/{{project_name_snake}}/config.py.jinja rename to src/{{ project_name_snake }}/config.py.jinja diff --git a/src/{{project_name_snake}}/qa_workflows.py b/src/{{ project_name_snake }}/qa_workflows.py similarity index 59% rename from src/{{project_name_snake}}/qa_workflows.py rename to src/{{ project_name_snake }}/qa_workflows.py index 96feecb..cde304a 100644 --- a/src/{{project_name_snake}}/qa_workflows.py +++ b/src/{{ project_name_snake }}/qa_workflows.py @@ -7,21 +7,31 @@ import tempfile from llama_index.core.chat_engine.types import BaseChatEngine, ChatMode from workflows import Workflow, step, Context -from workflows.events import StartEvent, StopEvent, Event, InputRequiredEvent, HumanResponseEvent +from workflows.events import ( + StartEvent, + StopEvent, + Event, + InputRequiredEvent, + HumanResponseEvent, +) from workflows.retry_policy import ConstantDelayRetryPolicy -from workflows.server import WorkflowServer -from llama_cloud_services import LlamaParse, LlamaCloudIndex +from llama_cloud_services import LlamaCloudIndex from llama_index.core import Settings from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.memory import ChatMemoryBuffer -from dotenv import load_dotenv -from .clients import get_custom_client, get_llama_cloud_client -from .config import PROJECT_ID, ORGANIZATION_ID -# Load environment variables -load_dotenv() +from .clients import ( + INDEX_NAME, + LLAMA_CLOUD_API_KEY, + LLAMA_CLOUD_BASE_URL, + get_custom_client, + get_llama_cloud_client, + get_llama_parse_client, + LLAMA_CLOUD_PROJECT_ID, +) + logger = logging.getLogger(__name__) @@ -30,57 +40,45 @@ class FileEvent(StartEvent): file_id: str index_name: str + class DownloadFileEvent(Event): file_id: str + class FileDownloadedEvent(Event): file_id: str file_path: str filename: str + class ChatEvent(StartEvent): index_name: str session_id: str + # Configure LLM and embedding model Settings.llm = OpenAI(model="gpt-4", temperature=0.1) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") custom_client = get_custom_client() + class DocumentUploadWorkflow(Workflow): """Workflow to upload and index documents using LlamaParse and LlamaCloud Index""" - + def __init__(self, **kwargs): super().__init__(**kwargs) # Get API key with validation - api_key = os.getenv("LLAMA_CLOUD_API_KEY") - if not api_key: - logger.warning("Warning: LLAMA_CLOUD_API_KEY not found in environment. Document upload will not work.") - self.parser = None - else: - # Initialize LlamaParse with recommended settings - logger.info(f"Initializing LlamaParse with API key: {api_key}") - self.parser = LlamaParse( - parse_mode="parse_page_with_agent", - model="openai-gpt-4-1-mini", - high_res_ocr=True, - adaptive_long_table=True, - outlined_table_extraction=True, - output_tables_as_HTML=True, - result_type="markdown", - api_key=api_key, - project_id=PROJECT_ID, - organization_id=ORGANIZATION_ID, - custom_client=custom_client - ) + + # Initialize LlamaParse with recommended settings + self.parser = get_llama_parse_client() @step(retry_policy=ConstantDelayRetryPolicy(maximum_attempts=3, delay=10)) async def run_file(self, event: FileEvent, ctx: Context) -> DownloadFileEvent: logger.info(f"Running file {event.file_id}") await ctx.store.set("index_name", event.index_name) return DownloadFileEvent(file_id=event.file_id) - + @step(retry_policy=ConstantDelayRetryPolicy(maximum_attempts=3, delay=10)) async def download_file( self, event: DownloadFileEvent, ctx: Context @@ -114,78 +112,61 @@ async def download_file( logger.error(f"Error downloading file {event.file_id}: {e}", exc_info=True) raise e - @step async def parse_document(self, ev: FileDownloadedEvent, ctx: Context) -> StopEvent: """Parse document and index it to LlamaCloud""" try: logger.info(f"Parsing document {ev.file_id}") - # Check if parser is initialized - if not self.parser: - return StopEvent(result={ - "success": False, - "error": "LLAMA_CLOUD_API_KEY not configured. Please set it in your .env file." - }) - # Get file path or content from event file_path = ev.file_path file_name = file_path.split("/")[-1] index_name = await ctx.store.get("index_name") - + # Parse the document if file_path: # Parse from file path result = await self.parser.aparse(file_path) - + # Get parsed documents documents = result.get_text_documents() - + # Create or connect to LlamaCloud Index - try: - logger.info(f"Connecting to existing index {index_name}") - # Try to connect to existing index - index = LlamaCloudIndex( - name=index_name, - project_id=PROJECT_ID, - organization_id=ORGANIZATION_ID, - api_key=os.getenv("LLAMA_CLOUD_API_KEY"), - custom_client=custom_client - ) - for document in documents: - index.insert(document) - except Exception: - # Create new index if doesn't exist - logger.info(f"Creating new index {index_name}") - index = LlamaCloudIndex.from_documents( - documents=documents, - name=index_name, - project_id=PROJECT_ID, - organization_id=ORGANIZATION_ID, - api_key=os.getenv("LLAMA_CLOUD_API_KEY"), - show_progress=True, - custom_client=custom_client - ) - - return StopEvent(result={ - "success": True, - "index_name": index_name, - "index_url": f"https://cloud.llamaindex.ai/projects/{PROJECT_ID}/indexes/{index.id}", - "document_count": len(documents), - "file_name": file_name, - "message": f"Successfully indexed {len(documents)} documents to '{index_name}'" - }) - + index = LlamaCloudIndex.create_index( + documents=documents, + name=index_name, + project_id=LLAMA_CLOUD_PROJECT_ID, + api_key=LLAMA_CLOUD_API_KEY, + base_url=LLAMA_CLOUD_BASE_URL, + show_progress=True, + custom_client=custom_client, + ) + + # Insert documents to index + logger.info(f"Inserting {len(documents)} documents to {index_name}") + for document in documents: + index.insert(document) + + return StopEvent( + result={ + "success": True, + "index_name": index_name, + "document_count": len(documents), + "index_url": f"https://cloud.llamaindex.ai/projects/{LLAMA_CLOUD_PROJECT_ID}/indexes/{index.id}", + "file_name": file_name, + "message": f"Successfully indexed {len(documents)} documents to '{index_name}'", + } + ) + except Exception as e: logger.error(e.stack_trace) - return StopEvent(result={ - "success": False, - "error": str(e), - "stack_trace": e.stack_trace - }) + return StopEvent( + result={"success": False, "error": str(e), "stack_trace": e.stack_trace} + ) class ChatResponseEvent(Event): """Event emitted when chat engine generates a response""" + response: str sources: list query: str @@ -193,6 +174,7 @@ class ChatResponseEvent(Event): class ChatDeltaEvent(Event): """Streaming delta for incremental response output""" + delta: str @@ -201,7 +183,9 @@ class ChatWorkflow(Workflow): def __init__(self, **kwargs): super().__init__(**kwargs) - self.chat_engines: dict[str, BaseChatEngine] = {} # Cache chat engines per index + self.chat_engines: dict[ + str, BaseChatEngine + ] = {} # Cache chat engines per index @step async def initialize_chat(self, ev: ChatEvent, ctx: Context) -> InputRequiredEvent: @@ -225,10 +209,10 @@ async def initialize_chat(self, ev: ChatEvent, ctx: Context) -> InputRequiredEve # Connect to LlamaCloud Index index = LlamaCloudIndex( name=index_name, - project_id=PROJECT_ID, - organization_id=ORGANIZATION_ID, - api_key=os.getenv("LLAMA_CLOUD_API_KEY"), - custom_client=custom_client + project_id=LLAMA_CLOUD_PROJECT_ID, + api_key=LLAMA_CLOUD_API_KEY, + base_url=LLAMA_CLOUD_BASE_URL, + async_httpx_client=custom_client, ) # Create chat engine with memory @@ -252,13 +236,17 @@ async def initialize_chat(self, ev: ChatEvent, ctx: Context) -> InputRequiredEve ) except Exception as e: - return StopEvent(result={ - "success": False, - "error": f"Failed to initialize chat: {str(e)}" - }) + return StopEvent( + result={ + "success": False, + "error": f"Failed to initialize chat: {str(e)}", + } + ) @step - async def process_user_response(self, ev: HumanResponseEvent, ctx: Context) -> InputRequiredEvent | HumanResponseEvent | StopEvent | None: + async def process_user_response( + self, ev: HumanResponseEvent, ctx: Context + ) -> InputRequiredEvent | HumanResponseEvent | StopEvent | None: """Process user input and generate response""" try: logger.info(f"Processing user response {ev.response}") @@ -268,13 +256,17 @@ async def process_user_response(self, ev: HumanResponseEvent, ctx: Context) -> I # Check for exit command if user_input.lower() == "exit": - logger.info(f"User input is exit") - conversation_history = await ctx.store.get("conversation_history", default=[]) - return StopEvent(result={ - "success": True, - "message": "Chat session ended.", - "conversation_history": conversation_history - }) + logger.info("User input is exit") + conversation_history = await ctx.store.get( + "conversation_history", default=[] + ) + return StopEvent( + result={ + "success": True, + "message": "Chat session ended.", + "conversation_history": conversation_history, + } + ) # Get session info from context index_name = await ctx.store.get("index_name") @@ -295,29 +287,43 @@ async def process_user_response(self, ev: HumanResponseEvent, ctx: Context) -> I # Extract source nodes for citations sources = [] - if hasattr(stream_response, 'source_nodes'): + if hasattr(stream_response, "source_nodes"): for node in stream_response.source_nodes: - sources.append({ - "text": node.text[:200] + "..." if len(node.text) > 200 else node.text, - "score": node.score if hasattr(node, 'score') else None, - "metadata": node.metadata if hasattr(node, 'metadata') else {} - }) + sources.append( + { + "text": node.text[:200] + "..." + if len(node.text) > 200 + else node.text, + "score": node.score if hasattr(node, "score") else None, + "metadata": node.metadata + if hasattr(node, "metadata") + else {}, + } + ) # Update conversation history - conversation_history = await ctx.store.get("conversation_history", default=[]) - conversation_history.append({ - "query": user_input, - "response": full_text.strip() if full_text else str(stream_response), - "sources": sources - }) + conversation_history = await ctx.store.get( + "conversation_history", default=[] + ) + conversation_history.append( + { + "query": user_input, + "response": full_text.strip() + if full_text + else str(stream_response), + "sources": sources, + } + ) await ctx.store.set("conversation_history", conversation_history) # After streaming completes, emit a summary response event to stream for frontend/main printing - ctx.write_event_to_stream(ChatResponseEvent( - response=full_text.strip() if full_text else str(stream_response), - sources=sources, - query=user_input, - )) + ctx.write_event_to_stream( + ChatResponseEvent( + response=full_text.strip() if full_text else str(stream_response), + sources=sources, + query=user_input, + ) + ) # Prompt for next input return InputRequiredEvent( @@ -325,14 +331,9 @@ async def process_user_response(self, ev: HumanResponseEvent, ctx: Context) -> I ) except Exception as e: - return StopEvent(result={ - "success": False, - "error": f"Error processing query: {str(e)}" - }) - - + return StopEvent( + result={"success": False, "error": f"Error processing query: {str(e)}"} + ) -# Create workflow server -app = WorkflowServer() -app.add_workflow("upload", DocumentUploadWorkflow(timeout=300)) -app.add_workflow("chat", ChatWorkflow(timeout=None)) +upload = DocumentUploadWorkflow(timeout=None) +chat = ChatWorkflow(timeout=None) \ No newline at end of file diff --git a/src/{{project_name_snake}}/__pycache__/__init__.cpython-312.pyc b/src/{{project_name_snake}}/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index 5f8b8bb387170437150fb1b0f6ddb04d37014dd3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 159 zcmX@j%ge<81b1d0&H&MmK?FMZ%mNgd&QQsq$>_I|p@<2{`wUX^%T7PEIJKx)za+J& zsIn>}F<-wtzbIQjB|o_|H#M)MShp}yzqlwFE)Wmo$H!;pWtPOp>lIYq;;_lhPbtkw YwJTx;8pjC4#URE`ZGrYj2nx zL!6TjJv65r+Y1s`l>7_)1-&?=QmjcTMJkux0_lkpZ`O9~B6!lid2iWn|&6K z8wjK@`>e7QLg=M*2BP+js}C?jPY^)_yQth7aTy1`QdWXdEvqmpZm6!6HH;KQ)mm(j zXgD7B%>aRGW5bn!MbtysO-eaJBfHpFFe0IHl;T>lH$SS0mX=5p_WSQ_`gOJ#WQqST zE_{s}Baug_JPt@ydvA2Fo=xykbbuwsBQ&U}JlVoo^OXdZvRI7g{Z_*%x^~@OaJ?oG znPM@&oVOMWtHqVA{;*sqZCIuJa$zsZsn6$jm{${T^?(b-mCZ#fzq(|7Q`i#8(aEp# z>ji6btth76$ho?qsP|3kvDnuEvP4m_t{0Lri{XRHo+XapeHO&l%ZF} zN9tieVjX&5)XRoSukJ$7p0NrQP>0Vgk*p%ZyjrJTlZ$YL+JrJcs|u~c`OdB|19}$R zDs6CK$hslQs*+#PmE8RAuX39{1mrknthHCMz1&Y8+s+ZsY1SdcpZmei`OLYVu)ueM zZ2tUCOH4kb+;W;e_v%(}nk69ZG<^O%s5Uw`Cr-`Gv6<;kBz~KD`rXl&oy_gmsuI&L zk)r9{`=z17CLjdvG+eiL{puclgI*oh0($R(F-kALrgump4+AKWLE9YRLm3USjayv`_yVOS7_YZrVSAbDC^ge4KaylDNrJ8?^LaA+6sze<8odel}*aU8&`28rqh}EJ7dHO&2-^a8>3t zn7pc9k!iZineA{2`D_lwc&-c0QU#FweFU|OsLPs`V>=bfWPShUB67Fxk)})Uu@7NY zz5xGcP?svk_zX>)q39Wky+rXBX#N5v+WIL<9HYc@l)Q|gv8l&v2V47FovANQ;} diff --git a/src/{{project_name_snake}}/__pycache__/qa_workflows.cpython-312.pyc b/src/{{project_name_snake}}/__pycache__/qa_workflows.cpython-312.pyc deleted file mode 100644 index d8de40bdf6653e386c548d311bf7a76ce95f9d82..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19269 zcmcJ1dvqJudFKpf@FZS*6MT;NG$`>Qks`&1WK$F=QKBeY58FaxqY!5(g2sax3?&f; z9o1I1bl2TTX&X^_teCW0Q)^|-oMca!ZM&5fTUO)jS-^w?ctUTyr#>g^zm~SRCra9L zy5DyP1AsJSxy|mCaOa!*n){mj{l3TW9gD?8LD>DhcP7v8qo{wt8#U-s#JvLyMO~$M zil=>)OS#f6n%rw#8ggY^3|uun)~|JGX{C(LrGqrar}rCN2EWl|BSndtAIR&<%4<~n>*^-0*Ffu>keBYs9+J8ZQfsq%(nC_~AhkX# zwU?weKx$)F3w_Xo()3gMh5o7M)#qofd=r$~!#{E}J=5w~XzBqqnxRG?Pv@-R>hGJnlqsQ7kUpS}EmLQf zo~50wA7i00CoNl!3zFy+&Ut)CgS_C>$kzRykT5vuk%j_iy#YZsAWiZZ+3-|Qd~U)Q zJTJ2$Ntl-PgTa6#T#%ev*>pnkh|jBK-;vO=L>pxHy@OQz&UtS{#ikCkI#gppx-_2^Wr#5Fv*8KfE0MFnx4-G$7lRd z!u@5B+lze(*PrR=85g?R$6I#ybnR~GoY=j)iXg}^{P_$YT@Q8Y$QAmU7Ayr zzz8f)_d_b7G)T=dPF6M!dwt4m%lZi*-CkZckxse;9=|YlLn|BHgmk-QliTeN@-tA% zEt}o$FVA>`d1`~H!a}&aLao!YVg)orq6Q#BW{CcfN?PnMjVA4dmyU^; zO0siLM;T=2o{m|Klacce1kVR>ebbX6Rzi(2u^OAL$A~G-HFVa5Q-tI!@_G+h#gWpmwHo3*JLAEtZ9!lDI1U^i}&1)_hTr8ZLTKMUBhG%bT)sz0FPCY?+ zy`bj}f`K;*#t9v7y2PqytjqKT>KapZ^Pp~CR$a58<;_A~D%S$JmaJUsJavI~*-lVS zt6Xv*b-J9I#vQDliQ!R|;-nxqL-IIJfal1&&86QxZYBh09+x>dNukWy$&tfzJcTL9 zj9DPg!YvkBDJh-%G83Wxgrz9n=%pg`U()}MrYT8FYC$xmA~!Vwt~{;ItwKU?R2<1+%Tke3SZiR(%Oy1rm6c*Ydj5KYc_a z6{tBlI6B}S96fgGfP3Ka5%-ay)8SK3dBgxraxa%7 zmxJ??n+VR3mEnN5c*S4 zKah(d3%I_L5psJ%?n5V!j>>t^-i!~DXQu^O@0s?xq1*6mF*qfROURCFLGi37;0+VD zf(e#Qe3zWhPP42H&A>T;+zLh_0Z-BdL$VHdGEAzh^99eIgNGiO60NG6nQ zbOWRJgn-|LVCPdD$6XW&qZov{aE-c(n_tgXwU^jLC<4*_i`1II^0Gc=D2o|t7rNdx z)P7V@bk%vq8Liy8TCi(g|Guqc&0O%+v6si96=K{RN|sm8ANZ3a8#>BT_|+pXABmPf zcB}N3Fg^k)<_aq| zLC*h=8R*5wD0ac&YWI6Cc>Obew@1SBRSL;kIBY!#yUZe@F!wZpVX{^r7>^m3F338_ znGply*pM1{!@xd?tYJ zIfwk1BMcB24+06NUL1ytID!!-z-E<#q5!Z7<`Ow2K^0C$97XyhMyDVO;rzgcBKt`; zh%*5mGyXf8n9dSb^aQ?4)U-o|0 z|FVB!bg3cH+#74|UA6Wl41G~U-DR-FNwiWsK0Z7Bw~ zyf%`v1+}N0)Jyc1RWeF-YC91O zpI!1L4U7lgk*||o*G=Kk6DY>I=^Pe&hdH@gd&r9DbI)h46_HSqAtCqfjp!pfKs|UE zK{!FxQ4-1uB-8_ZpEGCmrA5ujTzO;ecA+n(h#7YWpLf%onZbzpMVPBO^9$zl^m$5| zJ95Zc!n;&qh?6FT)IlqNfGz~a;Z_X#Njed~IXq$k>x9$NUHWVWE~WvAB+Mcy*}(XqqKIH-M4zk!iCpmbWfpOsoaga^^zA+~E5XyX=S953HI$7* z7sdsEaRAV?Af5+Br)3Khk}afnenA4jCwUZc(~XZH9d$$J$lQ=wl65I%hyh?7(d5YV zgsdH(oC!Q9(?SSOOkil4ps+TJI-v@GtA7A!+EnUG&{XJeLG&O@Zo@-7@- zTOCoy!RYZP=SSn_r;-JQ^9TQNT??iEDfBZ~#uw`jHPC;x^Wb*sjZ%83iG8E26X@G! zdZ?MbZP{N1H*Yr6LpyYDZfB5gwG2C$H~aF3`m}GE*`apbTXuS=)$~?AgLF|LrZ>@0 z@~vhP?;vq2jk)bK=I$~=3vcylhgsttN)PlMeIAs*Q$P;p>u7D zXgA9iQmNWbL@VK`O@dUOrVy}`kPGtgX~-aPsbUl`x0DiO&JfWbpq_y-&KUs+r2&ju z5+H|Boi&Z)jwFXl8sIG`iw64D$vUQ54;cOt*df z&k%?tI~J}2n@s?8H`od2n{dG!BDA7;6nA4?tth}nd=#U{FhV^DQL=;!2LZQ`3IXAK zN?m!z;iQb{UYcI z!JDl2L(c$_0<0ueaTet~FmH`N=SDS|;B;&Sjfe0NcwQ^$N*kK?c_r~_tjGX3lyx4W z$C3*WP@?i1EL4iIDJ0^NEFQ+_2u27U#UYFaAd+=nFtdr07{nVZLg%cih^=G>tO{hM z#XrVcXasQPDc~@S38ye3(0CpxoCxtN7$LWlwPZ?UgW4bQ0;GoWAj*Khlw!k5#*lt| zxZyxfm3HgqVlQBO|69d>>$*pcHpCt&ShfH4!#55u^+(J0#LYb)7M5P^`&!>xDVMCN zPgae8qGg-~8x&JkxK1(oh3jU@QGsY}ek578?YixnZHZYe+p%;!R@M$lHMQ4gugzYc zyEeDq;#JYxa_>c~|nH)qN|6SNfxshvN2;kII^oj=HrP=URQoT4nY1k!vH-#=h0c z{Ma`cukhD=v6;&lGYLeBDBx?@5>AE!< zef)IP7h#Yc0&=;y#qBv)zq7g0q|>}-g01#x5{axD@j~EP&vGpxznqG zq&xjI(t8=8-=>U*ikY{K1N0#~bJs+}&D}gL)V^!iB3;Y??X-(E&?S+>Xd6Vb-i@2t z?G^_yNskd?CdG(}N-M>Ph%OmK3KM6MBCq+M0VP{fD&=v(=L0h;EsjBI2!YsRifXw7 z%VX4s8Q+8G{w3;t_Q?&lL~H$|ozj_NrERzJ@7BgkkH)oQ>l#M8XK@5JyLQj=L2`*c z_Qa=ndyk=X$7oXE6VqqwRcniq#f|F>URyfJ^+RLjr&_pIYXZ5xpN6XIngLpCi#GMH zQ%Kw@RjzkWe@gDLylI1!QS6P_|8e-G1CyfewF8lDDMgc_(5`mwM zivk*#Jihd3ImIkP^!t)$4A|P|4s+`O|2=d~#OSB+3*i>V0rq4K3o|zj3ts>X_mKZR z*z$^t40cx*?5kSA02WYPmIc&Ea!qQkU<$)qCgfqv>oJD%hOF{tVllOV#ndR9ajI17 zX!w9iA!DBe3n}RCQ4`}0%mimb%4WeG;Ke<1)&pD069ZsGj3Y9vS~xAo!}^DlcNE5%4WM$ zarPk7)~0u%TnIJ3_djcGN_O|mv+o%k%4W=Pcr+$#5@>P#UV`7f--pL48vch0SzpYk zlT&vNqA8ur*h557buU48)R4;p07js+hIXqC9yHRLFKS5uZ8odwDSxMSv}dW$X@>zp zOc!B6h+{zWuLVIFl?YqHIa+)zOL)O8uqE&yP;C)y)7FLvBiYsBS@!N?H7%DlAtTUd zM9`bYv}mjQd|Lu(1UK_eBMO{{n-qN0AV6_o6o-bvc@Me`&-VmE=i!kY z=`4DPac4l_;r#zJm*SX1P$18wYV%^v$*o1F$fX z(-$2fWSyV51%hzo^UGFb6ex5--@!;O>tRaI1i|^xiZ*g^qXd;Z)(ZFGX$;RwEeVR- z1EJ?&s?@pQ9s%}@E)ZN3$jed_G9&Pgl$^aB%GuMRFyXxrt{C=!p#b_H%xa6Us9sKt zz{-dy6gkVYq(~x_;gS=fQ9O;*UtsjP(xHgw04V;6R47b%z(FG!!Cx`2pZ z!UY@@s;msKFAMF5}+xIb!?rveDlZ_?~%IW+L#6bbrf z6X-hAj34X|U;IgCl}Uc)DcXNQE0zbgq9Yn_YeWZg%K#*ya=Bnc6uXB3^z4P1nO3Py zmWjY9sx4DJ;ul5PoL z*LWWKZHbt3LOaxqkPxxt0(WYU_`>HvQz;hYF2(El!g8=upm-9<%Uvd`*A~7w-D@+H zPTMb1UBOmpKFbLvV&O|R)yQF|2!Qa$mwqd4lY%*%WB(=l`m=8ko9zYyf(@H%1|v+w zB-+8E@Hr<*z?9|8k+vj7%zRmPe;@Gnpe}byK(`!qKFC|Rsi#DA%C+ax0Qz6w;|O!p zp}L%?djsq(2xUm%%Wqbw>BJ=kl}02GFmBB_R`Hd5Ro1Ln)mr%W@YMju)29;wgB*a@jB+26UGQ_Bgh77+z$^YIXlFQFrnuSww8r_yS%kVGcr+8F zyUA37PehkW(K{_`;foP)WI+>&h?-&rcOvAK%^oNMJ~0F=$^{;DnL&4E;-}(<6_ksK zXN;Tpw7R9BTPaWUTe5!A1LBe-ivI=M0>caV)$rmKe8k^jl%5ksCrFSss`N4t*lF7W zd}R}E4Dc6%U8JmB8VQR(#HZwC;ceNL>b=@yFfDhf|I(0Dr@azGzuT+}w%iw>)91iP>saZQB-~eAl);4S?I@ z_Fc)A_Sa9}IF0x`+V@2C_nujLX0_$nMANfTdp)t})Fo>=lRF**YXKT{STyReVAQcv zT-|34H#x02$%=-pfV`q6QNBG^zI|yVR^By#1UlK!o~ZAS)%UNsR_hNXst?T{OO}-r z@Xoa*Y6fC81M!-{Ujx29NM9C`cE`f0n7#J0F+G;HW$n`WXk~ZYz6U;TN!S5cJ8!nG z%*30A-m?#{w^5I{>HF(;w7(=B;IU(+YDOTzfDPV{)(nD`s?;%maLrb7`729SaLY|r zZckM1iC6Bqd>~m=adqs<*jnGd73uG0e?0rw`|b|D+jne{ebx9K<5K-n$D(by;a#pf zdg^rI)U&Zu&#tv~ElVp;#oLA!i(W1NZu#BLwcY(ICvWw}caJR|c=hOak48^E2{T$> z0;ZSZQt*VdvBx4iDS;Xt?GX!p_RQ>T}zR-LXyohu6e&0NyVCCp7RGungR-`Tq` za{bt~WAVzCHSqnoL;&6$nVtJ)^4yM-%3wG4oNFY>Pc%Zity1 z;*H(!w{|Z$uGd_vi94GA=ESX<)xA%E6MM3R+t9O3N@E*VBA*=2$S3!~lL;M?vILTj z9}ce2KNw!#6Ls`IKp?pdugi<{Xg+;uf8`O5y3Nr;``Fvt3X$%_^xM0uf&NJ$JxsAb zDXK-fiyqdnKk04-`ppJ{{IrBV>|lRdI$(gCcWST$?{LOZEAx)CeALMNjG=-48LNf# zpBc4CTN$98)|@K*K|G5PI*{+JMAV!rN7H{s3J_=NsQhn8eI28#5M^7PzJrMv{ddfm zhlp66#x_`uwihHxGy!#JZA->=3N9;pa)};z@>9Ge<{y&3KBZWT4x+WF^|5sdiCgED zYxIN=oealLgx4wR1^S@o6N)+s7q~mD8B^}YG@p_xDT5N$R&>_bHCd;8>*U=5=M?fz z$?F%u#38eIl;YPRe$e5&-H@gA!8z|6hf3*?mI=QOIOmHwud8SRun8(eNRU!WMo=Z< zG?Q>sqvEJJQ26IkQa_{?s?c$jwggp%B(j9^C4=h5uE}5^_^2tZ?p6CC)_uN&0Volb zI$HvIl!5OGqa*|N?cem2BDJ{sl*_bO1}&AS>6vSYe#Q0qJ;->UJ=E7I?coF^GZIhE z=YkqLCB1^g@_mZ>65P!(1a+t7olGxzW5&4~l=19Tt}76 zdENq2ry-*IgT{yK^9viR=X`YLJQ;l(F!q<^tN+!et6}yl%8ycxr?WekGAq|u{|Bzl}NoP`n+5+su9P*qZO{${=zdSmZN8MBo=_q3; zh!}4ct1YNk8qEdo5Tn7{UEp^K@)JcI{>hg{=+^&G8NUQP$x?fj2GbyMf-fBl*P8b3 z-OG_r>=74_fD)Q~Wp_^C5WsPe>(pck06&HaI0;{MF@RXX1>H9jnhYCFClmvBZa ziF9?dD8n5m9|QJo@}4Khx@lzWz|hm~v4NvQ@W%y)PMlD_%aa-S<_Ubi6kJbmdkDT? z2XO%n590!X?nKBTR(J&<8-)OTkB?@5A|c3(7yPtfT7n))!SkrJMh7N@<*(ylPGJIk z%pBkq*?_Q1_(W2VOvQbE0)+##9E;@JEt&R$`#qW6jA$UUI1Ng3Z)2NZf#?DLP~eXM zt*W2Di@$=BXJ83J{{te$+r3Ejb}#CQ8+z9Ys&6*GSspJKo!2J|99Oqr*}hh~4gRld zslw`e6jNxL*8_g6tV>k1$12+66}x^-IgF3fmmAh>j)jK5td85-Q$TfL==zasN0yE+ zOPBNBv-hv%S1b(0>pI`d-~Aza%>U`$wW9K?N3I-MG{=itQgoEtA1@kQYiM1Xd42B2 z+=}Pt4Ub(jF3<~I$(n}i;cMZg#^u`8n%xU*l52d`^c~Yu_bS)1pjC65m(Q%$^gwQ1 z!=h*L#FA#Qd_f29K+%dF$;#R{%kK`wPd*ht{cQAvJ3jFHOZsK)^7zv8E48u4z0t{F zbb2NlJQqE8Av!S|tDIdJUMs7KZmaRrVPM)z|VoiY)-f1cl! za)(;j|7W(3s%jT^zuNzu{^j~uUDs+=_hr+E_EPlpO4u7?_QrTq4`Ax1v7g(It#{yz z{{|f6*Y{9mHCL-HSIz6zlyCFki@fD0-!t_8CNv6b{o+8$h>?Dyj6Kw-d83j(RHu8R zib0wi$cK#Ey9OO_xnmxn;qta^pa?E^>u4x(w^4`m4*F0Vd$*+$Xy>4;$8TTZoJf5< zGBe}l#YuRKoKGAgl|NR|ipHl9vE7IWiR4-(lejvxq8gx;>~+QVlPV1$iBil!#>{R- z?4#gJ#6Ge;oztqko+5%QMJmrj3STzzKFKASqS@b=65pxpc0gtZ%bJM7tz48f1~+DS zL0dq+=Oq>{*}ioc#84%dW+#IwFVw1h4lSaNSFXy=P5(84bQ#1rn_D3BqgVzN)0s~B zLm?u<9=TApF2WzSXjQ(C35lpSQ@#l@g74-i`8-*{?ZcRp#0XiRti=WE6yL{$!x$aG z2vLaQMb?ZITB3>dP0l0zC1yt-@$@8qV#?`M>0<`Iy+(bAHz?3xg+a{1+=tL0b9|Fm+|Qhm{c z8J!nR$$~u>Z5uibU3A&BLBVCiK-2jP+6@XW8#;zASZDxC6kV`k%9H`mH+#v#!R5lZ zZTCgPCG-2GjnS5&c5XV&_U-ykcLRdNh+-a;Zr&R+Lj^S#Gz|5Vea-MbD}{O!nlmJq+Jl}MisE`{ zD7KQ&fzn6`8cDTNQE5@pFpCOES}16N+Kz=7ZU0!BRJobZXn-0`4fKz-#UOT4pndO~ zo!uo(w`o3dzj^cB=X<~R-u#o(X(tf=^4$Al=5|8<4PW$N&JvIQlOf~=5s1J!3ub;1b*mp0^BIL9v*x*QW=$TR_awr~C#7m0bq}tCYAxRmyD8>}kc1DR$XrxJd zlnlluCY96Ti<4nV6tZbYC!?X*X;Get$7GSF`5D!vcX39PE{c-hpxTc|LebEvkR*$$ zo6?6O@kwDYCWx1yg7u83DB;+s47B;=gc$1^1j?;Z1JM^mL4aIH@*ElqDJR5eT$+Aj za%2SBX4IlcJS6Z!ToGdz)nXw&JQ;-v@Gpk=FgCAR1aTxZ8Br1?flft~6@EgBkAx#) zU_uy4m_{d*j$NaFdJDr(4!}@N(YPQ+)FMTUPKZ)SnUust1ujsBs1KxLXNMdOMIwGi zDu-1a*4H1Omh8);gFt)}E}2Mzj6i}0IKu2K8DRv&6>HEangmuf3r5i*m_+M{SukHQ z>RT7I&5}z@(0+#aEow!7c0%VSaGv_gCXVYAr^LBQn#YA=0!Qb}>8l`flQOIvmtTHb zD8B<7-Gbrlg`s12KwJdBM}H5|pD;IwlFNOKO_FaL2@$Mel4SmZ`D2D5iiy^OXk2G6 zdICHJMn8x%hNM9*Rcv~uV9;}r2_y-hYiF7$&Ri*0FbbyZpE+qzTzZb;(WwQ?Ta@LI z4XyZXLu%>q<9#Rk_(R7}p6loPP7U(M2A)lvJ0Fr_uq^wyO>((dT;WFIlXNXPI8$LM z9>WtA;PkD~*Lx})iO}LxaHRrj`2?PCej+p~@>5}Dj0Ym7Bue9OphMvfdeS<&I-=oN z_^fJ;hNKHZd@AO*t9Gp-9z1%WRL59&bc~lonU4=kYH3IaO(@}uA|HvzMtLRlLPS)3 z@ku2Tj){Wy&cgwfLc>Zp9#hL93vQ~GA@d=bKYI4WakU8AgKNMm(-Wd<2~C9g3*t2N zEyc&hVTBJ1syi-?hGOAF2%CX~H;Lb-n&e5i2db5a>l9XaUNyvJ)jTTF>t>pe!oV!d zbOMruM@O}vI(_oVfkS8c!G5^Hr;qdv4F>zp4xSu>1iuluju;tH%@?LZ(x@yoVGSG4 z4@W|>%=0oFF2Zr#<=-CYJSU5i3^!JirW0eKcqcA$XYOEkycp_~rQuux+;>_6Ih!1q zn3jNVl0?0}W$SNGK%Wc;h2#)KpIs)aR_AM$MXPtw+IVx%2iC@qO3H5dulrLqT}vf9 zW-UK=SFAdUQzhYL$9TG`Znpp1$JWfGx+Pt`BVDmAUE7du-jd!LNVjfH*R-a+^{dq_ zt6b+=c~P|s%RYimh-wfos_d|GNi|C%aF*C`RuW+*y>zd_^U=S9Y&%96U>-I&(&IF9 zeI&b;TulL3iom{MSTO1r>I_`Y@!WOF*Az^8i5WJ@j_a%{Uw{Z^y@X)#LHdmGOIorj zR=t`o;bu%pR%+gSyyM7FHiRUr_w_0fY%jrlzR%uZzhN?w8FOs?{On(tpLsmTBJ$1Z z40Og~BZ^y3PnzCug3bF8a6x&dpl!$*>)!ToMQC0 z|B9Vr{G|y$MFlyDRUpK|3d**;tU8?N|1y3(9FL8JM?q8z0q#^p49OxViwXxVgk#)v zd{W{9z{xo(_TMz97I|`bSd?Ye1l%Pqy$Ps8V~(l~ivSB(##E;BwRkj@0lAeIl(jknRAX1(3SbPcr z$td(MC(6%g;~JTaM5Z-SE(%=2om(9lSENm4+WaQzact!vw&K#;=Z8UV0d}L4paZf* zpcOZVBGsg!IYyLG*n^D725AT8x-=204>r-#zbR}AtS`0)s>&|` zy!j6l02y<)&Hc;L${XF^?Ov_q()G>h+Tl-3tiNQ9u-?)PVT(&M4&tl6ZN6o``O=bi z+w5T4+i=@`%RSF7dE4htFM78_5)`PoRWavS4jf$a_02x{iJ5qOzpQV%{nD+MR_ePJ z>$~pxm+E^~d_C9AA7%UL+@zoS6>s~Zw|&VQm>tZR-OiGaylrV;(`rM+^h>ULi>knikc`WfRp-K9Xk>%#PR-5#X5mVTD} z)YJ|1dj!+pV;H2_eFKDjuh)R|0S4#?ga!HmYa0l#4;+;Pe)d5VgLJbAYCZ6qkPfgw z`->3a;FSS{W9NCmVw1oKc-6u4FHVLc*&GXx=bGm!x{wg8NCi8bg=t2lrcHjc zT8v88=|Y_@v6HHHoEQx&3UH*UF)_vsLn*+=qX0rV0J^9vi82@BfRj;eP_^bLiwrCn z7_Mr1aZ;3~hx``l8R$)V7Na0We}vIlh*USvhhl)iDF_8fs2U?-;1sP`fX)~1yTt3! z6c94X@~^}C^`>3LubxOdF)DIhc~Yx@W!Ei`4qYD2Le3wmqfrgcp4H%rL~8Uy?Uf3X=56bH_J3OUSv%@9apxGI+njMqZ?0_PC*31G&gZK?YbQ}Dv z)CLXEJD@u781ULLkRgY%b3Az3na8sTa9%t0Ti_*8UO34d5Jx5;Bg_Imyb*|}YwBi= z53N3JH}j$bx%WB%jr;m4{2pB{1klVlUc3!KV3gdKR8#K|K~=r73BAo&10WXl?>vuAlpYv0r4Qh>`>6NmfA> zj)nrD>Cw~j&%B}l+Ba@8gcR!L{Jl0qPk7pteqL6hL*$j8$pS-vx)@QJVC(q*bp4t7s64LCMM) z7b&zYow2@b1!zm38=!4jV&@=e%;8WZoDex>3~n!SOc?+fpd^ZOB`~IeMG6q5L@zIB z?G*N?_G~Fo!X;#GY8l*oFr~?wYCZ-W5lmnz!&CSv^+Azv4`>Ex4pZV6#26n51Fu%g zp_(89{Ng7flQO_VYU)d@JR6_nLXyaZxG^y@0dSK8^&1)u#Q<~TC_^!M3cBOy_+Yp) z_X2=wfrEkLOmy^{XEXt>FESOH2EG(l0DMn~!{L$eFfgQ%xD=(M<`5Rr+Bxo|2@OJOmrhdg0}Uo0u`c9z5q+3&z;*6mpHVVaBbqHu+oP7 z7>uBbe;B_Tftf9=6AqJ=Gvo-5_Je;oH^JDpb*N-mV8%y~PdUfu z&uQ!LX5Xu*>fgh@x1B+{%Y^BBOh|XLK(7;txQnmC@6j$0S~;5%gL}U*jI$e42+3Jg z&|Z|a5EPcu^96?bDLO#NZDIr-$5Ctuf?<>_9;Cu+Fwsw9d9=8znP#1ur!X zNt@u%g|_sqq;0%J&&fag$#XA(cr8lW$I;)gAt*U9nk?GPCZw;S;Jg5ev{bTrDG;m9 zDk8WN(0Z(B^D+gaE)v|iQ9GdYCW1P~2u15^=! zo!@Y#lE%%}3qZ~$cB`Zv>~94R)8@GsA-@u#!+yGDul{{SvvH(6vlV~#^i1mg)v1lj+Y$( zW9V}OjPWJ5oRZ>bEdyVYD1mSTltR&quJq+KJ|oIU=>iN1Tw{n^&L&E!bk#wR2xvqI zfSN#!SA$T3mZqYSA|EQ1qzGgTsSZtRu3wX`d+ArSODm0%rtR7%$>5sMM!286~}H0=XLk6fzb9Os7cF%a{m;H}Dk+ z(iyCPY&$Sfd!;hp@7y5iRXgq(cy16`;=U9tGTwBxs1QXw zj|FhKz?zA^D2f9RlH_mz5*ijL$*usO?;ugX4i0>=z~4esvH>)rEF##aA%f~#)hoWO zi@vQ(zIF-$JXI_1`bBsBlDlE<=?~o7(rsJsPT!fnJ9B3ywfk7=-1)hgrM72Mo@Q#L zYf9JeO1B^U#9~B4oe>RnMljTYM6bQ=yXCuGd#iRXx#Zh5`()ZzvoWQ*Wg}>)u3xFz zwpg`oesHmB&+IXP5q18R+Q4FMU_P-_yKkj(-|SIHtn|$uS#?)jyEIp~?B1TPXW5YD?^@m6bN}qy`<8bP&Gp|s@xv3Tvroh9nk#-y zjOCRXmelUZct~^G-N)}dez*5dZ>sA^>Zvnxy-Q7JQw}ce;8q-Miw2en6p>_BN zF%4d%M*)a5`~#ewftL*-nR8U*7ZcPsVMuUCHM0vk)`c(_%t7723zOq`L!fY#Pf4Ki zX!ns<;yhe^F%;EJAvtq}j6=y?pEAv$qk(wcPL^m`cY!u5@@r^Bu7F6pLRMmPtXLZs ztqpUH53MbqQ~&8JrR5CR`!}<&p}^Q^wxrwx@CA|V7*Wd_q@s%1eX@2H;h8FEHxl`ME zGX$P*AEA%bsZdI|xOnPf2FymxA;TwxJOz(Wu}HQ)P?@AwpEb3nI`)H+-qiZ`dHP5_ zb?#Gq%^FOB(kw_gX4R~HGZAiwvjCfDQNmFX{N}0=_xEEoAD}@v3T8F1<-%Jo;3Clz zbsrd+cB1khNa%QY2K~0U;TwXIGaU)cf)Pe#EFpg{nmlwJ-HK|Vd_ z?S<4FzRvXzA6snJKlqAG3ue7N>IE*+d&)ocR(6n``p9(8tTF#-M$(x77(;JS>MlTh z#?7#-jzrK~yqOPnGoz4#u4o#PraW@dTLP4LcHO9QSX3XC%v==2*eKs7 zl*t*1^xpCOU;S1=OF5kZ_Mky8wRug!u8#)LJXrm|0;gxjJYy+ntq3&^!I88GPMk1& z^W~}{kmCM2tiF98pF;R*5-o}g($d0X+2 zYNsDVQH!zPp!wYnLbnN*_N@f+IQU#-U!u|8+uO_0Ps0%I41x)Xera-1Mn|HA20Uss%X)ydjoDBfSJ(Ws1$d zg;klbYR-B;tndy3fCddjx#+Uthb{E873nUNFvE(-JrGe2|54f2rLy)}Te_0FF>`%p zwW2;<*_1I6Z_6X_N!ouy*b@7!84L_ISISj4XI{3p!B;8xvUAzGYqg~Adx2R?y2N*5 z+x2ZwGhNvNl}qcOa;bgR0^ZD;rj_dLi`Cngt9Sf{IBbWQYb~p8-_4fghMmjqT_4pp z&h5V2bEjvicKbE^FFlnvimn%}cv=@dt;=nX14L;XdgwX1T3mf|V7Y16!{Xf^dCG4( z=fu0QJF!&D?o`E|WzXJpsrN?j_j*^$d^g*d%i3030`rr9J@e+w{m{R(9K2<_$=uwt z>aCqK%yrE*&7Gb%&tH7g1&D}EZRtt1_1-^|s_T2`ec}_AwDvKX-7w+LGj%x9&r;Pp zGIhjTf1~zV?W`Hx1}Rs^{KbdXJ)g@%u#&H|3{)|9nf@N*e4iD_ceWb`7{fcenEnIi zcXqQ#Kh{?R89yy=#&};J3lH=hGKL3?8R!QVX25QIU@ZaKzm?kLR9k+#kil!x6ip+l zCzlhT2SC<*gH)E{-fOlwJYR)H0iS~kCCO3Ve$uPsfJCaeDlv z*RicujB?&S)!L)^WA{sc0~yFNWCTJPmSLD*5ZkXv$0F(YR}y+iLLZWCzal*!l9mt2 z)(^?!ACg@kk~;9fGma}x0J#gGOR0 zUoux*X48aSA)ZCzx#nH0?z-Q!T)lsp9LT3u+^U&>VcFNUOm@J)jE>pF2dwWG#*$Bs jji$YsqYN>5E+6^kv9BGw?ppG24~-3(lMFFd(PjA`3!v#N diff --git a/src/{{project_name_snake}}/clients.py b/src/{{project_name_snake}}/clients.py deleted file mode 100644 index ea324cd..0000000 --- a/src/{{project_name_snake}}/clients.py +++ /dev/null @@ -1,34 +0,0 @@ -import functools -import os -import httpx - -import dotenv -from llama_cloud.client import AsyncLlamaCloud - -dotenv.load_dotenv() - -# deployed agents may infer their name from the deployment name -# Note: Make sure that an agent deployment with this name actually exists -# otherwise calls to get or set data will fail. You may need to adjust the `or ` -# name for development -agent_name = os.getenv("LLAMA_DEPLOY_DEPLOYMENT_NAME") -agent_name_or_default = agent_name or "test-proj" -# required for all llama cloud calls -api_key = os.environ["LLAMA_CLOUD_API_KEY"] -# get this in case running against a different environment than production -base_url = os.getenv("LLAMA_CLOUD_BASE_URL") -project_id = os.getenv("LLAMA_DEPLOY_PROJECT_ID") - - -def get_custom_client(): - return httpx.AsyncClient( - timeout=60, headers={"Project-Id": project_id} if project_id else None - ) - -@functools.lru_cache(maxsize=None) -def get_llama_cloud_client(): - return AsyncLlamaCloud( - base_url=base_url, - token=api_key, - httpx_client=get_custom_client(), - ) diff --git a/test-proj/.copier-answers.yml b/test-proj/.copier-answers.yml new file mode 100644 index 0000000..8463373 --- /dev/null +++ b/test-proj/.copier-answers.yml @@ -0,0 +1,6 @@ +# Changes here will be overwritten by Copier; NEVER EDIT MANUALLY +_commit: '2405947' +_src_path: . +llama_org_id: asdf +llama_project_id: asdf +project_name: test-proj diff --git a/test-proj/.gitignore b/test-proj/.gitignore new file mode 100644 index 0000000..5da78ab --- /dev/null +++ b/test-proj/.gitignore @@ -0,0 +1,4 @@ +.env +__pycache__ +workflows.db +.venv \ No newline at end of file diff --git a/test-proj/README.md b/test-proj/README.md new file mode 100644 index 0000000..92d1b72 --- /dev/null +++ b/test-proj/README.md @@ -0,0 +1,17 @@ +# Document Q&A Application + +A document question-answering application built with LlamaIndex workflows and LlamaCloud services. + + +This application uses LlamaDeploy. For more information see [the docs](https://developers.llamaindex.ai/python/cloud/llamadeploy/getting-started) + +# Getting Started + +1. install `uv` if you haven't `brew install uv` +2. run `uvx llamactl serve` +3. Visit http://localhost:4501/docs and see workflow APIs + + +# Organization + +- `src` contains python workflow sources. The name of the deployment here is defined as `document-qa`requests. See http://localhost:4501/docs for openAPI docs diff --git a/test-proj/pyproject.toml b/test-proj/pyproject.toml new file mode 100644 index 0000000..59ce3a5 --- /dev/null +++ b/test-proj/pyproject.toml @@ -0,0 +1,33 @@ +[project] +name = "test_proj" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +authors = [] +requires-python = ">=3.12" +dependencies = [ + "llama-index-workflows>=2.2.0,<3.0.0", + "llama-cloud-services>=0.6.68", + "llama-index-core>=0.14.0", + "llama-index-llms-openai>=0.5.6", + "llama-index-embeddings-openai>=0.5.1", + "python-dotenv>=1.1.1", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[dependency-groups] +dev = [] + +[tool.llamadeploy] +env-files = [".env"] +llama_cloud = true + +[tool.llamadeploy.ui] +directory = "./ui" + +[tool.llamadeploy.workflows] +upload = "test_proj.qa_workflows:upload" +chat = "test_proj.qa_workflows:chat" diff --git a/test-proj/src/test_proj/__init__.py b/test-proj/src/test_proj/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test-proj/src/test_proj/clients.py b/test-proj/src/test_proj/clients.py new file mode 100644 index 0000000..9f8d0d8 --- /dev/null +++ b/test-proj/src/test_proj/clients.py @@ -0,0 +1,52 @@ +import functools +import os +import httpx + +from llama_cloud.client import AsyncLlamaCloud +from llama_cloud_services import LlamaParse + +# deployed agents may infer their name from the deployment name +# Note: Make sure that an agent deployment with this name actually exists +# otherwise calls to get or set data will fail. You may need to adjust the `or ` +# name for development +DEPLOYMENT_NAME = os.getenv("LLAMA_DEPLOY_DEPLOYMENT_NAME") +# required for all llama cloud calls +LLAMA_CLOUD_API_KEY = os.environ["LLAMA_CLOUD_API_KEY"] +# get this in case running against a different environment than production +LLAMA_CLOUD_BASE_URL = os.getenv("LLAMA_CLOUD_BASE_URL") +LLAMA_CLOUD_PROJECT_ID = os.getenv("LLAMA_DEPLOY_PROJECT_ID") +INDEX_NAME = "document_qa_index" + + +def get_custom_client() -> httpx.AsyncClient: + return httpx.AsyncClient( + timeout=60, + headers={"Project-Id": LLAMA_CLOUD_PROJECT_ID} + if LLAMA_CLOUD_PROJECT_ID + else None, + ) + + +@functools.cache +def get_llama_cloud_client() -> AsyncLlamaCloud: + return AsyncLlamaCloud( + base_url=LLAMA_CLOUD_BASE_URL, + token=LLAMA_CLOUD_API_KEY, + httpx_client=get_custom_client(), + ) + + +@functools.cache +def get_llama_parse_client() -> LlamaParse: + return LlamaParse( + parse_mode="parse_page_with_agent", + model="openai-gpt-4-1-mini", + high_res_ocr=True, + adaptive_long_table=True, + outlined_table_extraction=True, + output_tables_as_HTML=True, + result_type="markdown", + api_key=LLAMA_CLOUD_API_KEY, + project_id=LLAMA_CLOUD_PROJECT_ID, + custom_client=get_custom_client(), + ) diff --git a/test-proj/src/test_proj/config.py b/test-proj/src/test_proj/config.py new file mode 100644 index 0000000..0df6766 --- /dev/null +++ b/test-proj/src/test_proj/config.py @@ -0,0 +1,2 @@ +PROJECT_ID = "asdf" +ORGANIZATION_ID = "asdf" \ No newline at end of file diff --git a/test-proj/src/test_proj/qa_workflows.py b/test-proj/src/test_proj/qa_workflows.py new file mode 100644 index 0000000..cde304a --- /dev/null +++ b/test-proj/src/test_proj/qa_workflows.py @@ -0,0 +1,339 @@ +import logging +import os +import uuid + +import httpx +from llama_cloud.types import RetrievalMode +import tempfile +from llama_index.core.chat_engine.types import BaseChatEngine, ChatMode +from workflows import Workflow, step, Context +from workflows.events import ( + StartEvent, + StopEvent, + Event, + InputRequiredEvent, + HumanResponseEvent, +) +from workflows.retry_policy import ConstantDelayRetryPolicy + +from llama_cloud_services import LlamaCloudIndex +from llama_index.core import Settings +from llama_index.llms.openai import OpenAI +from llama_index.embeddings.openai import OpenAIEmbedding +from llama_index.core.memory import ChatMemoryBuffer + +from .clients import ( + INDEX_NAME, + LLAMA_CLOUD_API_KEY, + LLAMA_CLOUD_BASE_URL, + get_custom_client, + get_llama_cloud_client, + get_llama_parse_client, + LLAMA_CLOUD_PROJECT_ID, +) + + +logger = logging.getLogger(__name__) + + +class FileEvent(StartEvent): + file_id: str + index_name: str + + +class DownloadFileEvent(Event): + file_id: str + + +class FileDownloadedEvent(Event): + file_id: str + file_path: str + filename: str + + +class ChatEvent(StartEvent): + index_name: str + session_id: str + + +# Configure LLM and embedding model +Settings.llm = OpenAI(model="gpt-4", temperature=0.1) +Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") + +custom_client = get_custom_client() + + +class DocumentUploadWorkflow(Workflow): + """Workflow to upload and index documents using LlamaParse and LlamaCloud Index""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + # Get API key with validation + + # Initialize LlamaParse with recommended settings + self.parser = get_llama_parse_client() + + @step(retry_policy=ConstantDelayRetryPolicy(maximum_attempts=3, delay=10)) + async def run_file(self, event: FileEvent, ctx: Context) -> DownloadFileEvent: + logger.info(f"Running file {event.file_id}") + await ctx.store.set("index_name", event.index_name) + return DownloadFileEvent(file_id=event.file_id) + + @step(retry_policy=ConstantDelayRetryPolicy(maximum_attempts=3, delay=10)) + async def download_file( + self, event: DownloadFileEvent, ctx: Context + ) -> FileDownloadedEvent: + """Download the file reference from the cloud storage""" + logger.info(f"Downloading file {event.file_id}") + try: + file_metadata = await get_llama_cloud_client().files.get_file( + id=event.file_id + ) + file_url = await get_llama_cloud_client().files.read_file_content( + event.file_id + ) + + temp_dir = tempfile.gettempdir() + filename = file_metadata.name + file_path = os.path.join(temp_dir, filename) + client = httpx.AsyncClient() + # Report progress to the UI + logger.info(f"Downloading file {file_url.url} to {file_path}") + + async with client.stream("GET", file_url.url) as response: + with open(file_path, "wb") as f: + async for chunk in response.aiter_bytes(): + f.write(chunk) + logger.info(f"Downloaded file {file_url.url} to {file_path}") + return FileDownloadedEvent( + file_id=event.file_id, file_path=file_path, filename=filename + ) + except Exception as e: + logger.error(f"Error downloading file {event.file_id}: {e}", exc_info=True) + raise e + + @step + async def parse_document(self, ev: FileDownloadedEvent, ctx: Context) -> StopEvent: + """Parse document and index it to LlamaCloud""" + try: + logger.info(f"Parsing document {ev.file_id}") + # Get file path or content from event + file_path = ev.file_path + file_name = file_path.split("/")[-1] + index_name = await ctx.store.get("index_name") + + # Parse the document + if file_path: + # Parse from file path + result = await self.parser.aparse(file_path) + + # Get parsed documents + documents = result.get_text_documents() + + # Create or connect to LlamaCloud Index + index = LlamaCloudIndex.create_index( + documents=documents, + name=index_name, + project_id=LLAMA_CLOUD_PROJECT_ID, + api_key=LLAMA_CLOUD_API_KEY, + base_url=LLAMA_CLOUD_BASE_URL, + show_progress=True, + custom_client=custom_client, + ) + + # Insert documents to index + logger.info(f"Inserting {len(documents)} documents to {index_name}") + for document in documents: + index.insert(document) + + return StopEvent( + result={ + "success": True, + "index_name": index_name, + "document_count": len(documents), + "index_url": f"https://cloud.llamaindex.ai/projects/{LLAMA_CLOUD_PROJECT_ID}/indexes/{index.id}", + "file_name": file_name, + "message": f"Successfully indexed {len(documents)} documents to '{index_name}'", + } + ) + + except Exception as e: + logger.error(e.stack_trace) + return StopEvent( + result={"success": False, "error": str(e), "stack_trace": e.stack_trace} + ) + + +class ChatResponseEvent(Event): + """Event emitted when chat engine generates a response""" + + response: str + sources: list + query: str + + +class ChatDeltaEvent(Event): + """Streaming delta for incremental response output""" + + delta: str + + +class ChatWorkflow(Workflow): + """Workflow to handle continuous chat queries against indexed documents""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.chat_engines: dict[ + str, BaseChatEngine + ] = {} # Cache chat engines per index + + @step + async def initialize_chat(self, ev: ChatEvent, ctx: Context) -> InputRequiredEvent: + """Initialize the chat session and request first input""" + try: + logger.info(f"Initializing chat {ev.index_name}") + index_name = ev.index_name + session_id = ev.session_id + + # Store session info in context + await ctx.store.set("index_name", index_name) + await ctx.store.set("session_id", session_id) + await ctx.store.set("conversation_history", []) + + # Create cache key for chat engine + cache_key = f"{index_name}_{session_id}" + + # Initialize chat engine if not exists + if cache_key not in self.chat_engines: + logger.info(f"Initializing chat engine {cache_key}") + # Connect to LlamaCloud Index + index = LlamaCloudIndex( + name=index_name, + project_id=LLAMA_CLOUD_PROJECT_ID, + api_key=LLAMA_CLOUD_API_KEY, + base_url=LLAMA_CLOUD_BASE_URL, + async_httpx_client=custom_client, + ) + + # Create chat engine with memory + memory = ChatMemoryBuffer.from_defaults(token_limit=3900) + self.chat_engines[cache_key] = index.as_chat_engine( + chat_mode=ChatMode.CONTEXT, + memory=memory, + llm=Settings.llm, + context_prompt=( + "You are a helpful assistant that answers questions based on the provided documents. " + "Always cite specific information from the documents when answering. " + "If you cannot find the answer in the documents, say so clearly." + ), + verbose=False, + retriever_mode=RetrievalMode.CHUNKS, + ) + + # Request first user input + return InputRequiredEvent( + prefix="Chat initialized. Ask a question (or type 'exit' to quit): " + ) + + except Exception as e: + return StopEvent( + result={ + "success": False, + "error": f"Failed to initialize chat: {str(e)}", + } + ) + + @step + async def process_user_response( + self, ev: HumanResponseEvent, ctx: Context + ) -> InputRequiredEvent | HumanResponseEvent | StopEvent | None: + """Process user input and generate response""" + try: + logger.info(f"Processing user response {ev.response}") + user_input = ev.response.strip() + + logger.info(f"User input: {user_input}") + + # Check for exit command + if user_input.lower() == "exit": + logger.info("User input is exit") + conversation_history = await ctx.store.get( + "conversation_history", default=[] + ) + return StopEvent( + result={ + "success": True, + "message": "Chat session ended.", + "conversation_history": conversation_history, + } + ) + + # Get session info from context + index_name = await ctx.store.get("index_name") + session_id = await ctx.store.get("session_id") + cache_key = f"{index_name}_{session_id}" + + # Get chat engine + chat_engine = self.chat_engines[cache_key] + + # Process query with chat engine (streaming) + stream_response = await chat_engine.astream_chat(user_input) + full_text = "" + + # Emit streaming deltas to the event stream + async for token in stream_response.async_response_gen(): + full_text += token + ctx.write_event_to_stream(ChatDeltaEvent(delta=token)) + + # Extract source nodes for citations + sources = [] + if hasattr(stream_response, "source_nodes"): + for node in stream_response.source_nodes: + sources.append( + { + "text": node.text[:200] + "..." + if len(node.text) > 200 + else node.text, + "score": node.score if hasattr(node, "score") else None, + "metadata": node.metadata + if hasattr(node, "metadata") + else {}, + } + ) + + # Update conversation history + conversation_history = await ctx.store.get( + "conversation_history", default=[] + ) + conversation_history.append( + { + "query": user_input, + "response": full_text.strip() + if full_text + else str(stream_response), + "sources": sources, + } + ) + await ctx.store.set("conversation_history", conversation_history) + + # After streaming completes, emit a summary response event to stream for frontend/main printing + ctx.write_event_to_stream( + ChatResponseEvent( + response=full_text.strip() if full_text else str(stream_response), + sources=sources, + query=user_input, + ) + ) + + # Prompt for next input + return InputRequiredEvent( + prefix="\nAsk another question (or type 'exit' to quit): " + ) + + except Exception as e: + return StopEvent( + result={"success": False, "error": f"Error processing query: {str(e)}"} + ) + +upload = DocumentUploadWorkflow(timeout=None) +chat = ChatWorkflow(timeout=None) \ No newline at end of file diff --git a/test-proj/tests/test_placeholder.py b/test-proj/tests/test_placeholder.py new file mode 100644 index 0000000..201975f --- /dev/null +++ b/test-proj/tests/test_placeholder.py @@ -0,0 +1,2 @@ +def test_placeholder(): + pass diff --git a/test-proj/ui/.gitignore b/test-proj/ui/.gitignore new file mode 100644 index 0000000..31ee023 --- /dev/null +++ b/test-proj/ui/.gitignore @@ -0,0 +1,4 @@ +node_modules +dist +# uses pnpm +pnpm-lock.yaml diff --git a/test-proj/ui/index.html b/test-proj/ui/index.html new file mode 100644 index 0000000..37e7c42 --- /dev/null +++ b/test-proj/ui/index.html @@ -0,0 +1,14 @@ + + + + + + Quick Start UI + + +
+ + + + + diff --git a/test-proj/ui/package.json b/test-proj/ui/package.json new file mode 100644 index 0000000..761feb4 --- /dev/null +++ b/test-proj/ui/package.json @@ -0,0 +1,35 @@ +{ + "name": "test-proj-ui", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "preview": "vite preview" + }, + "dependencies": { + "@llamaindex/ui": "^1.0.2", + "@llamaindex/workflows-client": "^1.2.0", + "llama-cloud-services": "^0.3.6", + "@radix-ui/themes": "^3.2.1", + "lucide-react": "^0.544.0", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "tw-animate-css": "^1.3.8" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4", + "@tailwindcss/vite": "^4.1.13", + "@types/node": "^20", + "@types/react": "^19", + "@types/react-dom": "^19", + "@vitejs/plugin-react": "^4.3.4", + "dotenv": "^17.2.2", + "eslint": "^9", + "tailwindcss": "^4", + "typescript": "^5", + "vite": "^5.4.8" + }, + "packageManager": "pnpm@10.11.0+sha512.6540583f41cc5f628eb3d9773ecee802f4f9ef9923cc45b69890fb47991d4b092964694ec3a4f738a420c918a333062c8b925d312f42e4f0c263eb603551f977" +} diff --git a/test-proj/ui/postcss.config.mjs b/test-proj/ui/postcss.config.mjs new file mode 100644 index 0000000..f3cc6d3 --- /dev/null +++ b/test-proj/ui/postcss.config.mjs @@ -0,0 +1,5 @@ +import tailwind from "@tailwindcss/postcss"; + +export default { + plugins: [tailwind()], +}; diff --git a/test-proj/ui/src/App.tsx b/test-proj/ui/src/App.tsx new file mode 100644 index 0000000..a52a680 --- /dev/null +++ b/test-proj/ui/src/App.tsx @@ -0,0 +1,14 @@ +import { ApiProvider } from "@llamaindex/ui"; +import Home from "./pages/Home"; +import { Theme } from "@radix-ui/themes"; +import { clients } from "@/libs/clients"; + +export default function App() { + return ( + + + + + + ) +} \ No newline at end of file diff --git a/test-proj/ui/src/components/ChatBot.tsx b/test-proj/ui/src/components/ChatBot.tsx new file mode 100644 index 0000000..f3af441 --- /dev/null +++ b/test-proj/ui/src/components/ChatBot.tsx @@ -0,0 +1,436 @@ +// This is a temporary chatbot component that is used to test the chatbot functionality. +// LlamaIndex will replace it with better chatbot component. +import { useState, useRef, useEffect, FormEvent, KeyboardEvent } from "react"; +import { Send, Loader2, Bot, User, MessageSquare, Trash2, RefreshCw } from "lucide-react"; +import { Button, Input, ScrollArea, Card, CardContent, cn, useWorkflowTaskCreate, useWorkflowTask } from "@llamaindex/ui"; +import { AGENT_NAME } from "../libs/config"; +import { toHumanResponseRawEvent } from "@/libs/utils"; + +type Role = "user" | "assistant"; +interface Message { + id: string; + role: Role; + content: string; + timestamp: Date; + error?: boolean; +} +export default function ChatBot() { + const { createTask } = useWorkflowTaskCreate(); + const messagesEndRef = useRef(null); + const inputRef = useRef(null); + const [messages, setMessages] = useState([]); + const [input, setInput] = useState(""); + const [isLoading, setIsLoading] = useState(false); + const [handlerId, setHandlerId] = useState(null); + const lastProcessedEventIndexRef = useRef(0); + const [canSend, setCanSend] = useState(false); + const streamingMessageIndexRef = useRef(null); + + // Deployment + auth setup + const deployment = AGENT_NAME || "document-qa"; + const platformToken = (import.meta as any).env?.VITE_LLAMA_CLOUD_API_KEY as string | undefined; + const projectId = (import.meta as any).env?.VITE_LLAMA_DEPLOY_PROJECT_ID as string | undefined; + const defaultIndexName = (import.meta as any).env?.VITE_DEFAULT_INDEX_NAME || "document_qa_index"; + const sessionIdRef = useRef(`chat-${Math.random().toString(36).slice(2)}-${Date.now()}`); + + // UI text defaults + const title = "AI Document Assistant"; + const placeholder = "Ask me anything about your documents..."; + const welcomeMessage = "Welcome! 👋 Upload a document with the control above, then ask questions here."; + + // Helper functions for message management + const appendMessage = (role: Role, msg: string): void => { + setMessages(prev => { + const id = `${role}-stream-${Date.now()}`; + const idx = prev.length; + streamingMessageIndexRef.current = idx; + return [ + ...prev, + { + id, + role, + content: msg, + timestamp: new Date(), + }, + ]; + }); + }; + + const updateMessage = (index: number, message: string) => { + setMessages(prev => { + if (index < 0 || index >= prev.length) return prev; + const copy = [...prev]; + const existing = copy[index]; + copy[index] = { ...existing, content: message }; + return copy; + }); + }; + + // Initialize with welcome message + useEffect(() => { + if (messages.length === 0) { + const welcomeMsg: Message = { + id: "welcome", + role: "assistant", + content: welcomeMessage, + timestamp: new Date() + }; + setMessages([welcomeMsg]); + } + }, []); + + // Create chat task on init + useEffect(() => { + (async () => { + if (!handlerId) { + const handler = await createTask("chat", { + index_name: defaultIndexName, + session_id: sessionIdRef.current, + }); + setHandlerId(handler.handler_id); + } + })(); + }, []); + + // Subscribe to task/events using hook (auto stream when handler exists) + const { events } = useWorkflowTask(handlerId ?? "", Boolean(handlerId)); + + // Process streamed events into messages + useEffect(() => { + if (!events || events.length === 0) return; + let startIdx = lastProcessedEventIndexRef.current; + if (startIdx < 0) startIdx = 0; + if (startIdx >= events.length) return; + + for (let i = startIdx; i < events.length; i++) { + const ev: any = events[i]; + const type = ev?.type as string | undefined; + const rawData = ev?.data as any; + if (!type) continue; + const data = (rawData && (rawData._data ?? rawData)) as any; + + if (type.includes("ChatDeltaEvent")) { + const delta: string = data?.delta ?? ""; + if (!delta) continue; + if (streamingMessageIndexRef.current === null) { + appendMessage("assistant", delta); + } else { + const idx = streamingMessageIndexRef.current; + const current = messages[idx!]?.content ?? ""; + if (current === "Thinking...") { + updateMessage(idx!, delta); + } else { + updateMessage(idx!, current + delta); + } + } + } else if (type.includes("ChatResponseEvent")) { + // finalize current stream + streamingMessageIndexRef.current = null; + } else if (type.includes("InputRequiredEvent")) { + // ready for next user input; enable send + setCanSend(true); + setIsLoading(false); + inputRef.current?.focus(); + } else if (type.includes("StopEvent")) { + // finished; no summary bubble needed (chat response already streamed) + } + } + lastProcessedEventIndexRef.current = events.length; + }, [events, messages]); + + const scrollToBottom = () => { + messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); + }; + + useEffect(() => { + scrollToBottom(); + }, [messages]); + + // No manual SSE cleanup needed + + const getCommonHeaders = () => ({ + ...(platformToken ? { authorization: `Bearer ${platformToken}` } : {}), + ...(projectId ? { "Project-Id": projectId } : {}), + }); + + const startChatIfNeeded = async (): Promise => { + if (handlerId) return handlerId; + const handler = await createTask("chat", { + index_name: defaultIndexName, + session_id: sessionIdRef.current, + }); + setHandlerId(handler.handler_id); + return handler.handler_id; + }; + + // Removed manual SSE ensureEventStream; hook handles streaming + + const handleSubmit = async (e: FormEvent) => { + e.preventDefault(); + + const trimmedInput = input.trim(); + if (!trimmedInput || isLoading || !canSend) return; + + // Add user message + const userMessage: Message = { + id: `user-${Date.now()}`, + role: "user", + content: trimmedInput, + timestamp: new Date() + }; + + const newMessages = [...messages, userMessage]; + setMessages(newMessages); + setInput(""); + setIsLoading(true); + setCanSend(false); + + // Immediately create an assistant placeholder to avoid visual gap before deltas + if (streamingMessageIndexRef.current === null) { + appendMessage("assistant", "Thinking..."); + } + + try { + // Ensure chat handler exists (created on init) + const hid = await startChatIfNeeded(); + + // Send user input as HumanResponseEvent + const postRes = await fetch(`/deployments/${deployment}/events/${hid}`, { + method: "POST", + headers: { + "Content-Type": "application/json", + ...getCommonHeaders(), + }, + body: JSON.stringify({ + event: JSON.stringify(toHumanResponseRawEvent(trimmedInput)) + }), + }); + if (!postRes.ok) { + throw new Error(`Failed to send message: ${postRes.status} ${postRes.statusText}`); + } + + // The assistant reply will be streamed by useWorkflowTask and appended incrementally + } catch (err) { + console.error("Chat error:", err); + + // Add error message + const errorMessage: Message = { + id: `error-${Date.now()}`, + role: "assistant", + content: `Sorry, I encountered an error: ${err instanceof Error ? err.message : "Unknown error"}. Please try again.`, + timestamp: new Date(), + error: true + }; + + setMessages(prev => [...prev, errorMessage]); + } finally { + setIsLoading(false); + // Focus back on input + inputRef.current?.focus(); + } + }; + + const handleKeyDown = (e: KeyboardEvent) => { + // Submit on Enter (without Shift) + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + handleSubmit(e as any); + } + }; + + const clearChat = () => { + setMessages([ + { + id: "welcome", + role: "assistant" as const, + content: welcomeMessage, + timestamp: new Date() + } + ]); + setInput(""); + inputRef.current?.focus(); + }; + + const retryLastMessage = () => { + const lastUserMessage = messages.filter(m => m.role === "user").pop(); + if (lastUserMessage) { + // Remove the last assistant message if it was an error + const lastMessage = messages[messages.length - 1]; + if (lastMessage.role === "assistant" && lastMessage.error) { + setMessages(prev => prev.slice(0, -1)); + } + setInput(lastUserMessage.content); + inputRef.current?.focus(); + } + }; + + return ( +
+ {/* Header */} +
+
+
+ +

{title}

+ {isLoading && ( + Thinking... + )} +
+
+ {messages.some(m => m.error) && ( + + )} + {messages.length > 0 && ( + + )} +
+
+
+ + {/* Messages */} + + {messages.length === 0 ? ( +
+
+ +

+ No messages yet +

+

+ Start a conversation! +

+
+
+ ) : ( +
+ {messages.map((message) => ( +
+ {message.role !== "user" && ( +
+ +
+ )} +
+ + +

+ {message.content} +

+

+ {message.timestamp.toLocaleTimeString()} +

+
+
+
+ {message.role === "user" && ( +
+ +
+ )} +
+ ))} + + {isLoading && ( +
+
+ +
+ + +
+
+ + + +
+
+
+
+
+ )} +
+
+ )} + + + {/* Input */} +
+
+ setInput(e.target.value)} + onKeyDown={handleKeyDown} + placeholder={placeholder} + disabled={isLoading} + className="flex-1" + autoFocus + /> + +
+

+ Press Enter to send • Shift+Enter for new line +

+
+
+ ); +} \ No newline at end of file diff --git a/test-proj/ui/src/index.css b/test-proj/ui/src/index.css new file mode 100644 index 0000000..d8e401f --- /dev/null +++ b/test-proj/ui/src/index.css @@ -0,0 +1,120 @@ +@import "tailwindcss"; +@import "tw-animate-css"; + +@custom-variant dark (&:is(.dark *)); + +@theme inline { + --radius-sm: calc(var(--radius) - 4px); + --radius-md: calc(var(--radius) - 2px); + --radius-lg: var(--radius); + --radius-xl: calc(var(--radius) + 4px); + --color-background: var(--background); + --color-foreground: var(--foreground); + --color-card: var(--card); + --color-card-foreground: var(--card-foreground); + --color-popover: var(--popover); + --color-popover-foreground: var(--popover-foreground); + --color-primary: var(--primary); + --color-primary-foreground: var(--primary-foreground); + --color-secondary: var(--secondary); + --color-secondary-foreground: var(--secondary-foreground); + --color-muted: var(--muted); + --color-muted-foreground: var(--muted-foreground); + --color-accent: var(--accent); + --color-accent-foreground: var(--accent-foreground); + --color-destructive: var(--destructive); + --color-border: var(--border); + --color-input: var(--input); + --color-ring: var(--ring); + --color-chart-1: var(--chart-1); + --color-chart-2: var(--chart-2); + --color-chart-3: var(--chart-3); + --color-chart-4: var(--chart-4); + --color-chart-5: var(--chart-5); + --color-sidebar: var(--sidebar); + --color-sidebar-foreground: var(--sidebar-foreground); + --color-sidebar-primary: var(--sidebar-primary); + --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); + --color-sidebar-accent: var(--sidebar-accent); + --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); + --color-sidebar-border: var(--sidebar-border); + --color-sidebar-ring: var(--sidebar-ring); +} + +:root { + --radius: 0.625rem; + --card: oklch(1 0 0); + --card-foreground: oklch(0.141 0.005 285.823); + --popover: oklch(1 0 0); + --popover-foreground: oklch(0.141 0.005 285.823); + --primary: oklch(0.21 0.006 285.885); + --primary-foreground: oklch(0.985 0 0); + --secondary: oklch(0.967 0.001 286.375); + --secondary-foreground: oklch(0.21 0.006 285.885); + --muted: oklch(0.967 0.001 286.375); + --muted-foreground: oklch(0.552 0.016 285.938); + --accent: oklch(0.967 0.001 286.375); + --accent-foreground: oklch(0.21 0.006 285.885); + --destructive: oklch(0.577 0.245 27.325); + --border: oklch(0.92 0.004 286.32); + --input: oklch(0.92 0.004 286.32); + --ring: oklch(0.705 0.015 286.067); + --chart-1: oklch(0.646 0.222 41.116); + --chart-2: oklch(0.6 0.118 184.704); + --chart-3: oklch(0.398 0.07 227.392); + --chart-4: oklch(0.828 0.189 84.429); + --chart-5: oklch(0.769 0.188 70.08); + --sidebar: oklch(0.985 0 0); + --sidebar-foreground: oklch(0.141 0.005 285.823); + --sidebar-primary: oklch(0.21 0.006 285.885); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.967 0.001 286.375); + --sidebar-accent-foreground: oklch(0.21 0.006 285.885); + --sidebar-border: oklch(0.92 0.004 286.32); + --sidebar-ring: oklch(0.705 0.015 286.067); + --background: oklch(1 0 0); + --foreground: oklch(0.141 0.005 285.823); +} + +.dark { + --background: oklch(0.141 0.005 285.823); + --foreground: oklch(0.985 0 0); + --card: oklch(0.21 0.006 285.885); + --card-foreground: oklch(0.985 0 0); + --popover: oklch(0.21 0.006 285.885); + --popover-foreground: oklch(0.985 0 0); + --primary: oklch(0.92 0.004 286.32); + --primary-foreground: oklch(0.21 0.006 285.885); + --secondary: oklch(0.274 0.006 286.033); + --secondary-foreground: oklch(0.985 0 0); + --muted: oklch(0.274 0.006 286.033); + --muted-foreground: oklch(0.705 0.015 286.067); + --accent: oklch(0.274 0.006 286.033); + --accent-foreground: oklch(0.985 0 0); + --destructive: oklch(0.704 0.191 22.216); + --border: oklch(1 0 0 / 10%); + --input: oklch(1 0 0 / 15%); + --ring: oklch(0.552 0.016 285.938); + --chart-1: oklch(0.488 0.243 264.376); + --chart-2: oklch(0.696 0.17 162.48); + --chart-3: oklch(0.769 0.188 70.08); + --chart-4: oklch(0.627 0.265 303.9); + --chart-5: oklch(0.645 0.246 16.439); + --sidebar: oklch(0.21 0.006 285.885); + --sidebar-foreground: oklch(0.985 0 0); + --sidebar-primary: oklch(0.488 0.243 264.376); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.274 0.006 286.033); + --sidebar-accent-foreground: oklch(0.985 0 0); + --sidebar-border: oklch(1 0 0 / 10%); + --sidebar-ring: oklch(0.552 0.016 285.938); +} + +@layer base { + * { + @apply border-border outline-ring/50; + } + body { + @apply bg-background text-foreground; + } +} diff --git a/test-proj/ui/src/libs/clients.ts b/test-proj/ui/src/libs/clients.ts new file mode 100644 index 0000000..192f57d --- /dev/null +++ b/test-proj/ui/src/libs/clients.ts @@ -0,0 +1,32 @@ +import { ApiClients, cloudApiClient, createWorkflowClient, createWorkflowConfig } from "@llamaindex/ui"; +import { AGENT_NAME } from "./config"; + +const platformToken = import.meta.env.VITE_LLAMA_CLOUD_API_KEY; +const apiBaseUrl = import.meta.env.VITE_LLAMA_CLOUD_BASE_URL; +const projectId = import.meta.env.VITE_LLAMA_DEPLOY_PROJECT_ID; + +// Configure the platform client +cloudApiClient.setConfig({ + ...(apiBaseUrl && { baseUrl: apiBaseUrl }), + headers: { + // optionally use a backend API token scoped to a project. For local development, + ...(platformToken && { authorization: `Bearer ${platformToken}` }), + // This header is required for requests to correctly scope to the agent's project + // when authenticating with a user cookie + ...(projectId && { "Project-Id": projectId }), + }, +}); + +const workflowsClient = createWorkflowClient(createWorkflowConfig({ + baseUrl: `/deployments/${AGENT_NAME}/`, + headers: { + ...(platformToken && { authorization: `Bearer ${platformToken}` }), + } +})); + +const clients: ApiClients = { + workflowsClient: workflowsClient, + cloudApiClient: cloudApiClient, +}; + +export { clients }; diff --git a/test-proj/ui/src/libs/config.ts b/test-proj/ui/src/libs/config.ts new file mode 100644 index 0000000..8b821e9 --- /dev/null +++ b/test-proj/ui/src/libs/config.ts @@ -0,0 +1,2 @@ +export const APP_TITLE = "Test proj" +export const AGENT_NAME = import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME; diff --git a/test-proj/ui/src/libs/utils.ts b/test-proj/ui/src/libs/utils.ts new file mode 100644 index 0000000..183e8a2 --- /dev/null +++ b/test-proj/ui/src/libs/utils.ts @@ -0,0 +1,3 @@ +export function toHumanResponseRawEvent(str: string) { + return { __is_pydantic: true, value: { _data: { response: str } }, qualified_name: "workflows.events.HumanResponseEvent" } +} \ No newline at end of file diff --git a/test-proj/ui/src/main.tsx b/test-proj/ui/src/main.tsx new file mode 100644 index 0000000..fa6dbbf --- /dev/null +++ b/test-proj/ui/src/main.tsx @@ -0,0 +1,13 @@ +import React from "react"; +import ReactDOM from "react-dom/client"; +import App from "./App"; +import "@llamaindex/ui/styles.css"; +import "./index.css"; + +ReactDOM.createRoot(document.getElementById("root")!).render( + + + +); + + diff --git a/test-proj/ui/src/pages/Home.tsx b/test-proj/ui/src/pages/Home.tsx new file mode 100644 index 0000000..1a20aa2 --- /dev/null +++ b/test-proj/ui/src/pages/Home.tsx @@ -0,0 +1,46 @@ +import ChatBot from "../components/ChatBot"; +import { WorkflowTrigger } from "@llamaindex/ui"; +import { APP_TITLE } from "../libs/config"; + +export default function Home() { + return ( +
+
+ {/* Header */} +
+

+ {APP_TITLE} +

+

+ Upload documents and ask questions about them +

+
+ +
+
+ { + return { + file_id: files[0].fileId, + index_name: fieldValues.index_name, + }; + }} + /> +
+
+
+ +
+
+
+
+
+ ); +} \ No newline at end of file diff --git a/test-proj/ui/src/vite-env.d.ts b/test-proj/ui/src/vite-env.d.ts new file mode 100644 index 0000000..77b8ee8 --- /dev/null +++ b/test-proj/ui/src/vite-env.d.ts @@ -0,0 +1,16 @@ +/// + +interface ImportMetaEnv { + readonly VITE_LLAMA_CLOUD_API_KEY?: string; + readonly VITE_LLAMA_CLOUD_BASE_URL?: string; + + // injected from llama_deploy + readonly VITE_LLAMA_DEPLOY_BASE_PATH: string; + readonly VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME: string; + readonly VITE_LLAMA_DEPLOY_PROJECT_ID: string; +} + +interface ImportMeta { + readonly env: ImportMetaEnv; +} + diff --git a/test-proj/ui/tsconfig.json b/test-proj/ui/tsconfig.json new file mode 100644 index 0000000..099ed9d --- /dev/null +++ b/test-proj/ui/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "target": "ES2020", + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "esModuleInterop": true, + "module": "ESNext", + "moduleResolution": "Bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + "types": ["vite/client"], + "baseUrl": ".", + "paths": { + "@/*": ["src/*"] + } + }, + "include": ["src"] +} diff --git a/test-proj/ui/vite.config.ts b/test-proj/ui/vite.config.ts new file mode 100644 index 0000000..966a1d5 --- /dev/null +++ b/test-proj/ui/vite.config.ts @@ -0,0 +1,49 @@ +import { defineConfig } from "vite"; +import react from "@vitejs/plugin-react"; +import path from "path"; +import dotenv from "dotenv"; + +dotenv.config({ path: '../.env' }); + +// https://vitejs.dev/config/ +export default defineConfig(({}) => { + const deploymentId = process.env.LLAMA_DEPLOY_DEPLOYMENT_URL_ID; + const basePath = process.env.LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH; + const projectId = process.env.LLAMA_DEPLOY_PROJECT_ID; + const port = process.env.PORT ? Number(process.env.PORT) : 3000; + const baseUrl = process.env.LLAMA_CLOUD_BASE_URL; + const apiKey = process.env.LLAMA_CLOUD_API_KEY; + + return { + plugins: [react()], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), + }, + }, + server: { + port: port, + host: true, + }, + build: { + outDir: "dist", + sourcemap: true, + }, + base: basePath, + define: { + "import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME": + JSON.stringify(deploymentId), + "import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_BASE_PATH": JSON.stringify(basePath), + ...(projectId && { + "import.meta.env.VITE_LLAMA_DEPLOY_PROJECT_ID": + JSON.stringify(projectId), + }), + ...(baseUrl && { + "import.meta.env.VITE_LLAMA_CLOUD_BASE_URL": JSON.stringify(baseUrl), + }), + ...(apiKey && { + "import.meta.env.VITE_LLAMA_CLOUD_API_KEY": JSON.stringify(apiKey), + }), + }, + }; +}); From b4f56862bed79bacf97132e4c87d40aeaca02e8d Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:05:39 -0400 Subject: [PATCH 2/8] add github --- .github/workflows/check-regeneration.yml | 69 ++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 .github/workflows/check-regeneration.yml diff --git a/.github/workflows/check-regeneration.yml b/.github/workflows/check-regeneration.yml new file mode 100644 index 0000000..cb15cea --- /dev/null +++ b/.github/workflows/check-regeneration.yml @@ -0,0 +1,69 @@ +name: Check Template Regeneration + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + check-template: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + + - name: Install uv + uses: astral-sh/setup-uv@v3 + + - name: Run regeneration check + run: uv run copier/copy_utils.py check-regeneration + + check-python: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + + - name: Install uv + uses: astral-sh/setup-uv@v3 + + - name: Run Python checks + run: uv run hatch run all-check + working-directory: test-proj + + check-ui: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '24' + + - name: Enable Corepack + run: corepack enable + + - name: Activate pnpm version + working-directory: test-proj/ui + run: corepack prepare --activate + + + - name: Run UI checks + run: pnpm run all-check + working-directory: test-proj/ui \ No newline at end of file From 3e56a0cf1d59a8a759d6c37430eb0fba8a3bfe5a Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:08:38 -0400 Subject: [PATCH 3/8] Add py tests/lints --- pyproject.toml.jinja | 17 ++++++++++++++++- src/{{ project_name_snake }}/qa_workflows.py | 5 ++--- test-proj/pyproject.toml | 17 ++++++++++++++++- test-proj/src/test_proj/config.py | 2 +- test-proj/src/test_proj/qa_workflows.py | 5 ++--- 5 files changed, 37 insertions(+), 9 deletions(-) diff --git a/pyproject.toml.jinja b/pyproject.toml.jinja index 762dd75..08d2618 100644 --- a/pyproject.toml.jinja +++ b/pyproject.toml.jinja @@ -19,7 +19,22 @@ requires = ["hatchling"] build-backend = "hatchling.build" [dependency-groups] -dev = [] +dev = [ + "hatch>=1.14.1", + "pytest>=8.4.2", + "ruff>=0.13.0", + "ty>=0.0.1a20", +] + +[tool.hatch.envs.default.scripts] +"format" = "ruff format ." +"format-check" = "ruff format --check ." +"lint" = "ruff check --fix ." +"lint-check" = ["ruff check ."] +typecheck = "ty check src" +test = "pytest" +"all-check" = ["format-check", "lint-check", "test"] +"all-fix" = ["format", "lint", "test"] [tool.llamadeploy] env-files = [".env"] diff --git a/src/{{ project_name_snake }}/qa_workflows.py b/src/{{ project_name_snake }}/qa_workflows.py index cde304a..5363255 100644 --- a/src/{{ project_name_snake }}/qa_workflows.py +++ b/src/{{ project_name_snake }}/qa_workflows.py @@ -1,6 +1,5 @@ import logging import os -import uuid import httpx from llama_cloud.types import RetrievalMode @@ -23,7 +22,6 @@ from llama_index.core.memory import ChatMemoryBuffer from .clients import ( - INDEX_NAME, LLAMA_CLOUD_API_KEY, LLAMA_CLOUD_BASE_URL, get_custom_client, @@ -335,5 +333,6 @@ async def process_user_response( result={"success": False, "error": f"Error processing query: {str(e)}"} ) + upload = DocumentUploadWorkflow(timeout=None) -chat = ChatWorkflow(timeout=None) \ No newline at end of file +chat = ChatWorkflow(timeout=None) diff --git a/test-proj/pyproject.toml b/test-proj/pyproject.toml index 59ce3a5..7206b7e 100644 --- a/test-proj/pyproject.toml +++ b/test-proj/pyproject.toml @@ -19,7 +19,22 @@ requires = ["hatchling"] build-backend = "hatchling.build" [dependency-groups] -dev = [] +dev = [ + "hatch>=1.14.1", + "pytest>=8.4.2", + "ruff>=0.13.0", + "ty>=0.0.1a20", +] + +[tool.hatch.envs.default.scripts] +"format" = "ruff format ." +"format-check" = "ruff format --check ." +"lint" = "ruff check --fix ." +"lint-check" = ["ruff check ."] +typecheck = "ty check src" +test = "pytest" +"all-check" = ["format-check", "lint-check", "test"] +"all-fix" = ["format", "lint", "test"] [tool.llamadeploy] env-files = [".env"] diff --git a/test-proj/src/test_proj/config.py b/test-proj/src/test_proj/config.py index 0df6766..28c04d7 100644 --- a/test-proj/src/test_proj/config.py +++ b/test-proj/src/test_proj/config.py @@ -1,2 +1,2 @@ PROJECT_ID = "asdf" -ORGANIZATION_ID = "asdf" \ No newline at end of file +ORGANIZATION_ID = "asdf" diff --git a/test-proj/src/test_proj/qa_workflows.py b/test-proj/src/test_proj/qa_workflows.py index cde304a..5363255 100644 --- a/test-proj/src/test_proj/qa_workflows.py +++ b/test-proj/src/test_proj/qa_workflows.py @@ -1,6 +1,5 @@ import logging import os -import uuid import httpx from llama_cloud.types import RetrievalMode @@ -23,7 +22,6 @@ from llama_index.core.memory import ChatMemoryBuffer from .clients import ( - INDEX_NAME, LLAMA_CLOUD_API_KEY, LLAMA_CLOUD_BASE_URL, get_custom_client, @@ -335,5 +333,6 @@ async def process_user_response( result={"success": False, "error": f"Error processing query: {str(e)}"} ) + upload = DocumentUploadWorkflow(timeout=None) -chat = ChatWorkflow(timeout=None) \ No newline at end of file +chat = ChatWorkflow(timeout=None) From 0960afef9e791db01945f491485115399b98fadf Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:10:03 -0400 Subject: [PATCH 4/8] reformats --- test-proj/ui/package.json | 12 +- test-proj/ui/src/App.tsx | 4 +- test-proj/ui/src/components/ChatBot.tsx | 186 +++++++++++++++--------- test-proj/ui/src/libs/clients.ts | 21 ++- test-proj/ui/src/libs/config.ts | 2 +- test-proj/ui/src/libs/utils.ts | 8 +- test-proj/ui/src/main.tsx | 4 +- test-proj/ui/src/pages/Home.tsx | 16 +- test-proj/ui/src/vite-env.d.ts | 1 - 9 files changed, 163 insertions(+), 91 deletions(-) diff --git a/test-proj/ui/package.json b/test-proj/ui/package.json index 761feb4..cc20ce9 100644 --- a/test-proj/ui/package.json +++ b/test-proj/ui/package.json @@ -5,14 +5,19 @@ "type": "module", "scripts": { "dev": "vite", - "build": "tsc -b && vite build", - "preview": "vite preview" + "build": "tsc --noEmit && vite build", + "preview": "vite preview", + "lint": "tsc --noEmit", + "format": "prettier --write src", + "format-check": "prettier --check src", + "all-check": "pnpm i && pnpm run lint && pnpm run format-check && pnpm run build", + "all-fix": "pnpm i && pnpm run lint && pnpm run format && pnpm run build" }, "dependencies": { "@llamaindex/ui": "^1.0.2", "@llamaindex/workflows-client": "^1.2.0", - "llama-cloud-services": "^0.3.6", "@radix-ui/themes": "^3.2.1", + "llama-cloud-services": "^0.3.6", "lucide-react": "^0.544.0", "react": "^19.0.0", "react-dom": "^19.0.0", @@ -27,6 +32,7 @@ "@vitejs/plugin-react": "^4.3.4", "dotenv": "^17.2.2", "eslint": "^9", + "prettier": "^3.6.2", "tailwindcss": "^4", "typescript": "^5", "vite": "^5.4.8" diff --git a/test-proj/ui/src/App.tsx b/test-proj/ui/src/App.tsx index a52a680..6658701 100644 --- a/test-proj/ui/src/App.tsx +++ b/test-proj/ui/src/App.tsx @@ -10,5 +10,5 @@ export default function App() { - ) -} \ No newline at end of file + ); +} diff --git a/test-proj/ui/src/components/ChatBot.tsx b/test-proj/ui/src/components/ChatBot.tsx index f3af441..979506f 100644 --- a/test-proj/ui/src/components/ChatBot.tsx +++ b/test-proj/ui/src/components/ChatBot.tsx @@ -1,8 +1,25 @@ // This is a temporary chatbot component that is used to test the chatbot functionality. // LlamaIndex will replace it with better chatbot component. import { useState, useRef, useEffect, FormEvent, KeyboardEvent } from "react"; -import { Send, Loader2, Bot, User, MessageSquare, Trash2, RefreshCw } from "lucide-react"; -import { Button, Input, ScrollArea, Card, CardContent, cn, useWorkflowTaskCreate, useWorkflowTask } from "@llamaindex/ui"; +import { + Send, + Loader2, + Bot, + User, + MessageSquare, + Trash2, + RefreshCw, +} from "lucide-react"; +import { + Button, + Input, + ScrollArea, + Card, + CardContent, + cn, + useWorkflowTaskCreate, + useWorkflowTask, +} from "@llamaindex/ui"; import { AGENT_NAME } from "../libs/config"; import { toHumanResponseRawEvent } from "@/libs/utils"; @@ -28,19 +45,27 @@ export default function ChatBot() { // Deployment + auth setup const deployment = AGENT_NAME || "document-qa"; - const platformToken = (import.meta as any).env?.VITE_LLAMA_CLOUD_API_KEY as string | undefined; - const projectId = (import.meta as any).env?.VITE_LLAMA_DEPLOY_PROJECT_ID as string | undefined; - const defaultIndexName = (import.meta as any).env?.VITE_DEFAULT_INDEX_NAME || "document_qa_index"; - const sessionIdRef = useRef(`chat-${Math.random().toString(36).slice(2)}-${Date.now()}`); + const platformToken = (import.meta as any).env?.VITE_LLAMA_CLOUD_API_KEY as + | string + | undefined; + const projectId = (import.meta as any).env?.VITE_LLAMA_DEPLOY_PROJECT_ID as + | string + | undefined; + const defaultIndexName = + (import.meta as any).env?.VITE_DEFAULT_INDEX_NAME || "document_qa_index"; + const sessionIdRef = useRef( + `chat-${Math.random().toString(36).slice(2)}-${Date.now()}`, + ); // UI text defaults const title = "AI Document Assistant"; const placeholder = "Ask me anything about your documents..."; - const welcomeMessage = "Welcome! 👋 Upload a document with the control above, then ask questions here."; + const welcomeMessage = + "Welcome! 👋 Upload a document with the control above, then ask questions here."; // Helper functions for message management const appendMessage = (role: Role, msg: string): void => { - setMessages(prev => { + setMessages((prev) => { const id = `${role}-stream-${Date.now()}`; const idx = prev.length; streamingMessageIndexRef.current = idx; @@ -57,7 +82,7 @@ export default function ChatBot() { }; const updateMessage = (index: number, message: string) => { - setMessages(prev => { + setMessages((prev) => { if (index < 0 || index >= prev.length) return prev; const copy = [...prev]; const existing = copy[index]; @@ -73,7 +98,7 @@ export default function ChatBot() { id: "welcome", role: "assistant", content: welcomeMessage, - timestamp: new Date() + timestamp: new Date(), }; setMessages([welcomeMsg]); } @@ -176,7 +201,7 @@ export default function ChatBot() { id: `user-${Date.now()}`, role: "user", content: trimmedInput, - timestamp: new Date() + timestamp: new Date(), }; const newMessages = [...messages, userMessage]; @@ -202,11 +227,13 @@ export default function ChatBot() { ...getCommonHeaders(), }, body: JSON.stringify({ - event: JSON.stringify(toHumanResponseRawEvent(trimmedInput)) + event: JSON.stringify(toHumanResponseRawEvent(trimmedInput)), }), }); if (!postRes.ok) { - throw new Error(`Failed to send message: ${postRes.status} ${postRes.statusText}`); + throw new Error( + `Failed to send message: ${postRes.status} ${postRes.statusText}`, + ); } // The assistant reply will be streamed by useWorkflowTask and appended incrementally @@ -219,10 +246,10 @@ export default function ChatBot() { role: "assistant", content: `Sorry, I encountered an error: ${err instanceof Error ? err.message : "Unknown error"}. Please try again.`, timestamp: new Date(), - error: true + error: true, }; - setMessages(prev => [...prev, errorMessage]); + setMessages((prev) => [...prev, errorMessage]); } finally { setIsLoading(false); // Focus back on input @@ -232,7 +259,7 @@ export default function ChatBot() { const handleKeyDown = (e: KeyboardEvent) => { // Submit on Enter (without Shift) - if (e.key === 'Enter' && !e.shiftKey) { + if (e.key === "Enter" && !e.shiftKey) { e.preventDefault(); handleSubmit(e as any); } @@ -244,20 +271,20 @@ export default function ChatBot() { id: "welcome", role: "assistant" as const, content: welcomeMessage, - timestamp: new Date() - } + timestamp: new Date(), + }, ]); setInput(""); inputRef.current?.focus(); }; const retryLastMessage = () => { - const lastUserMessage = messages.filter(m => m.role === "user").pop(); + const lastUserMessage = messages.filter((m) => m.role === "user").pop(); if (lastUserMessage) { // Remove the last assistant message if it was an error const lastMessage = messages[messages.length - 1]; if (lastMessage.role === "assistant" && lastMessage.error) { - setMessages(prev => prev.slice(0, -1)); + setMessages((prev) => prev.slice(0, -1)); } setInput(lastUserMessage.content); inputRef.current?.focus(); @@ -265,19 +292,27 @@ export default function ChatBot() { }; return ( -
+
{/* Header */}
-

{title}

+

+ {title} +

{isLoading && ( - Thinking... + + Thinking... + )}
- {messages.some(m => m.error) && ( + {messages.some((m) => m.error) && (
); -} \ No newline at end of file +} diff --git a/test-proj/ui/src/vite-env.d.ts b/test-proj/ui/src/vite-env.d.ts index 77b8ee8..25ad5bc 100644 --- a/test-proj/ui/src/vite-env.d.ts +++ b/test-proj/ui/src/vite-env.d.ts @@ -13,4 +13,3 @@ interface ImportMetaEnv { interface ImportMeta { readonly env: ImportMetaEnv; } - From 15771898280d79a10f3cdbb554c762a3a0083847 Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:13:18 -0400 Subject: [PATCH 5/8] hmm --- copier.yaml | 8 ++++---- copier/copy_utils.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/copier.yaml b/copier.yaml index e3d6d67..eedba2f 100644 --- a/copier.yaml +++ b/copier.yaml @@ -10,19 +10,19 @@ project_name: project_title: type: str - default: "{{ (project_name.replace('-', ' '))[:1] | upper ~ (project_name.replace('-', ' '))[1:] }}" - when: false + help: What is the title of your project? This will be used in the UI Title Bar. + default: "{{ project_name.replace('-', ' ').title() }}" # computed variables project_name_snake: type: str default: "{{ project_name.replace('-', '_') }}" when: false - + _exclude: - "test-proj" - ".git" - ".github" - "copier" - "CONTRIBUTING.md" - - "copier.yaml" \ No newline at end of file + - "copier.yaml" diff --git a/copier/copy_utils.py b/copier/copy_utils.py index 2c37c30..6555d29 100755 --- a/copier/copy_utils.py +++ b/copier/copy_utils.py @@ -44,6 +44,7 @@ def run_copier_quietly(src_path: str, dst_path: str, data: Dict[str, str]) -> No data=data, unsafe=True, quiet=True, + vcs_ref="HEAD", ) From 1b1cc97e2e5ae64d2903dff95373b8245d597c82 Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:14:30 -0400 Subject: [PATCH 6/8] fix formats --- ui/package.json.jinja | 12 ++- ui/src/App.tsx | 4 +- ui/src/components/ChatBot.tsx | 186 ++++++++++++++++++++++------------ ui/src/libs/clients.ts | 21 ++-- ui/src/libs/utils.ts | 8 +- ui/src/main.tsx | 4 +- ui/src/pages/Home.tsx | 16 +-- ui/src/vite-env.d.ts | 1 - 8 files changed, 162 insertions(+), 90 deletions(-) diff --git a/ui/package.json.jinja b/ui/package.json.jinja index e5d90d4..36904ec 100644 --- a/ui/package.json.jinja +++ b/ui/package.json.jinja @@ -5,14 +5,19 @@ "type": "module", "scripts": { "dev": "vite", - "build": "tsc -b && vite build", - "preview": "vite preview" + "build": "tsc --noEmit && vite build", + "preview": "vite preview", + "lint": "tsc --noEmit", + "format": "prettier --write src", + "format-check": "prettier --check src", + "all-check": "pnpm i && pnpm run lint && pnpm run format-check && pnpm run build", + "all-fix": "pnpm i && pnpm run lint && pnpm run format && pnpm run build" }, "dependencies": { "@llamaindex/ui": "^1.0.2", "@llamaindex/workflows-client": "^1.2.0", - "llama-cloud-services": "^0.3.6", "@radix-ui/themes": "^3.2.1", + "llama-cloud-services": "^0.3.6", "lucide-react": "^0.544.0", "react": "^19.0.0", "react-dom": "^19.0.0", @@ -27,6 +32,7 @@ "@vitejs/plugin-react": "^4.3.4", "dotenv": "^17.2.2", "eslint": "^9", + "prettier": "^3.6.2", "tailwindcss": "^4", "typescript": "^5", "vite": "^5.4.8" diff --git a/ui/src/App.tsx b/ui/src/App.tsx index a52a680..6658701 100644 --- a/ui/src/App.tsx +++ b/ui/src/App.tsx @@ -10,5 +10,5 @@ export default function App() { - ) -} \ No newline at end of file + ); +} diff --git a/ui/src/components/ChatBot.tsx b/ui/src/components/ChatBot.tsx index f3af441..979506f 100644 --- a/ui/src/components/ChatBot.tsx +++ b/ui/src/components/ChatBot.tsx @@ -1,8 +1,25 @@ // This is a temporary chatbot component that is used to test the chatbot functionality. // LlamaIndex will replace it with better chatbot component. import { useState, useRef, useEffect, FormEvent, KeyboardEvent } from "react"; -import { Send, Loader2, Bot, User, MessageSquare, Trash2, RefreshCw } from "lucide-react"; -import { Button, Input, ScrollArea, Card, CardContent, cn, useWorkflowTaskCreate, useWorkflowTask } from "@llamaindex/ui"; +import { + Send, + Loader2, + Bot, + User, + MessageSquare, + Trash2, + RefreshCw, +} from "lucide-react"; +import { + Button, + Input, + ScrollArea, + Card, + CardContent, + cn, + useWorkflowTaskCreate, + useWorkflowTask, +} from "@llamaindex/ui"; import { AGENT_NAME } from "../libs/config"; import { toHumanResponseRawEvent } from "@/libs/utils"; @@ -28,19 +45,27 @@ export default function ChatBot() { // Deployment + auth setup const deployment = AGENT_NAME || "document-qa"; - const platformToken = (import.meta as any).env?.VITE_LLAMA_CLOUD_API_KEY as string | undefined; - const projectId = (import.meta as any).env?.VITE_LLAMA_DEPLOY_PROJECT_ID as string | undefined; - const defaultIndexName = (import.meta as any).env?.VITE_DEFAULT_INDEX_NAME || "document_qa_index"; - const sessionIdRef = useRef(`chat-${Math.random().toString(36).slice(2)}-${Date.now()}`); + const platformToken = (import.meta as any).env?.VITE_LLAMA_CLOUD_API_KEY as + | string + | undefined; + const projectId = (import.meta as any).env?.VITE_LLAMA_DEPLOY_PROJECT_ID as + | string + | undefined; + const defaultIndexName = + (import.meta as any).env?.VITE_DEFAULT_INDEX_NAME || "document_qa_index"; + const sessionIdRef = useRef( + `chat-${Math.random().toString(36).slice(2)}-${Date.now()}`, + ); // UI text defaults const title = "AI Document Assistant"; const placeholder = "Ask me anything about your documents..."; - const welcomeMessage = "Welcome! 👋 Upload a document with the control above, then ask questions here."; + const welcomeMessage = + "Welcome! 👋 Upload a document with the control above, then ask questions here."; // Helper functions for message management const appendMessage = (role: Role, msg: string): void => { - setMessages(prev => { + setMessages((prev) => { const id = `${role}-stream-${Date.now()}`; const idx = prev.length; streamingMessageIndexRef.current = idx; @@ -57,7 +82,7 @@ export default function ChatBot() { }; const updateMessage = (index: number, message: string) => { - setMessages(prev => { + setMessages((prev) => { if (index < 0 || index >= prev.length) return prev; const copy = [...prev]; const existing = copy[index]; @@ -73,7 +98,7 @@ export default function ChatBot() { id: "welcome", role: "assistant", content: welcomeMessage, - timestamp: new Date() + timestamp: new Date(), }; setMessages([welcomeMsg]); } @@ -176,7 +201,7 @@ export default function ChatBot() { id: `user-${Date.now()}`, role: "user", content: trimmedInput, - timestamp: new Date() + timestamp: new Date(), }; const newMessages = [...messages, userMessage]; @@ -202,11 +227,13 @@ export default function ChatBot() { ...getCommonHeaders(), }, body: JSON.stringify({ - event: JSON.stringify(toHumanResponseRawEvent(trimmedInput)) + event: JSON.stringify(toHumanResponseRawEvent(trimmedInput)), }), }); if (!postRes.ok) { - throw new Error(`Failed to send message: ${postRes.status} ${postRes.statusText}`); + throw new Error( + `Failed to send message: ${postRes.status} ${postRes.statusText}`, + ); } // The assistant reply will be streamed by useWorkflowTask and appended incrementally @@ -219,10 +246,10 @@ export default function ChatBot() { role: "assistant", content: `Sorry, I encountered an error: ${err instanceof Error ? err.message : "Unknown error"}. Please try again.`, timestamp: new Date(), - error: true + error: true, }; - setMessages(prev => [...prev, errorMessage]); + setMessages((prev) => [...prev, errorMessage]); } finally { setIsLoading(false); // Focus back on input @@ -232,7 +259,7 @@ export default function ChatBot() { const handleKeyDown = (e: KeyboardEvent) => { // Submit on Enter (without Shift) - if (e.key === 'Enter' && !e.shiftKey) { + if (e.key === "Enter" && !e.shiftKey) { e.preventDefault(); handleSubmit(e as any); } @@ -244,20 +271,20 @@ export default function ChatBot() { id: "welcome", role: "assistant" as const, content: welcomeMessage, - timestamp: new Date() - } + timestamp: new Date(), + }, ]); setInput(""); inputRef.current?.focus(); }; const retryLastMessage = () => { - const lastUserMessage = messages.filter(m => m.role === "user").pop(); + const lastUserMessage = messages.filter((m) => m.role === "user").pop(); if (lastUserMessage) { // Remove the last assistant message if it was an error const lastMessage = messages[messages.length - 1]; if (lastMessage.role === "assistant" && lastMessage.error) { - setMessages(prev => prev.slice(0, -1)); + setMessages((prev) => prev.slice(0, -1)); } setInput(lastUserMessage.content); inputRef.current?.focus(); @@ -265,19 +292,27 @@ export default function ChatBot() { }; return ( -
+
{/* Header */}
-

{title}

+

+ {title} +

{isLoading && ( - Thinking... + + Thinking... + )}
- {messages.some(m => m.error) && ( + {messages.some((m) => m.error) && (
); -} \ No newline at end of file +} diff --git a/ui/src/vite-env.d.ts b/ui/src/vite-env.d.ts index 77b8ee8..25ad5bc 100644 --- a/ui/src/vite-env.d.ts +++ b/ui/src/vite-env.d.ts @@ -13,4 +13,3 @@ interface ImportMetaEnv { interface ImportMeta { readonly env: ImportMetaEnv; } - From 0ec58bb53de173946e0f5e651be65e83f696eda7 Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:15:34 -0400 Subject: [PATCH 7/8] m --- src/{{ project_name_snake }}/config.py.jinja | 2 -- test-proj/.gitignore | 2 +- test-proj/src/test_proj/config.py | 2 -- test-proj/ui/src/libs/config.ts | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) delete mode 100644 src/{{ project_name_snake }}/config.py.jinja delete mode 100644 test-proj/src/test_proj/config.py diff --git a/src/{{ project_name_snake }}/config.py.jinja b/src/{{ project_name_snake }}/config.py.jinja deleted file mode 100644 index 1d7f57a..0000000 --- a/src/{{ project_name_snake }}/config.py.jinja +++ /dev/null @@ -1,2 +0,0 @@ -PROJECT_ID = "{{ llama_project_id }}" -ORGANIZATION_ID = "{{ llama_org_id }}" \ No newline at end of file diff --git a/test-proj/.gitignore b/test-proj/.gitignore index 5da78ab..06b8b0d 100644 --- a/test-proj/.gitignore +++ b/test-proj/.gitignore @@ -1,4 +1,4 @@ .env __pycache__ workflows.db -.venv \ No newline at end of file +.venv diff --git a/test-proj/src/test_proj/config.py b/test-proj/src/test_proj/config.py deleted file mode 100644 index 28c04d7..0000000 --- a/test-proj/src/test_proj/config.py +++ /dev/null @@ -1,2 +0,0 @@ -PROJECT_ID = "asdf" -ORGANIZATION_ID = "asdf" diff --git a/test-proj/ui/src/libs/config.ts b/test-proj/ui/src/libs/config.ts index 09c4d72..0b4adfe 100644 --- a/test-proj/ui/src/libs/config.ts +++ b/test-proj/ui/src/libs/config.ts @@ -1,2 +1,2 @@ -export const APP_TITLE = "Test proj"; +export const APP_TITLE = "Test Proj" export const AGENT_NAME = import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME; From 73d0352f4687ec0f9a1f011f65af02d5ba512fb2 Mon Sep 17 00:00:00 2001 From: Adrian Lyjak Date: Thu, 18 Sep 2025 14:16:31 -0400 Subject: [PATCH 8/8] m --- .env | 2 -- .env.template | 2 ++ test-proj/.env.template | 2 ++ test-proj/ui/src/libs/config.ts | 2 +- ui/src/libs/config.ts | 2 -- ui/src/libs/config.ts.jinja | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) delete mode 100644 .env create mode 100644 .env.template create mode 100644 test-proj/.env.template delete mode 100644 ui/src/libs/config.ts diff --git a/.env b/.env deleted file mode 100644 index 0da2864..0000000 --- a/.env +++ /dev/null @@ -1,2 +0,0 @@ -# OpenAI API configuration -OPENAI_API_KEY=sk-your-openai-api-key-here diff --git a/.env.template b/.env.template new file mode 100644 index 0000000..eb7fa0a --- /dev/null +++ b/.env.template @@ -0,0 +1,2 @@ +# Copy this to .env and set any necessary secrets +OPENAI_API_KEY=sk-your-openai-api-key-here diff --git a/test-proj/.env.template b/test-proj/.env.template new file mode 100644 index 0000000..eb7fa0a --- /dev/null +++ b/test-proj/.env.template @@ -0,0 +1,2 @@ +# Copy this to .env and set any necessary secrets +OPENAI_API_KEY=sk-your-openai-api-key-here diff --git a/test-proj/ui/src/libs/config.ts b/test-proj/ui/src/libs/config.ts index 0b4adfe..fec32d8 100644 --- a/test-proj/ui/src/libs/config.ts +++ b/test-proj/ui/src/libs/config.ts @@ -1,2 +1,2 @@ -export const APP_TITLE = "Test Proj" +export const APP_TITLE = "Test Proj"; export const AGENT_NAME = import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME; diff --git a/ui/src/libs/config.ts b/ui/src/libs/config.ts deleted file mode 100644 index 87ce41f..0000000 --- a/ui/src/libs/config.ts +++ /dev/null @@ -1,2 +0,0 @@ -export const APP_TITLE = "Test Project"; -export const AGENT_NAME = import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME; \ No newline at end of file diff --git a/ui/src/libs/config.ts.jinja b/ui/src/libs/config.ts.jinja index 8832fe5..acd8c84 100644 --- a/ui/src/libs/config.ts.jinja +++ b/ui/src/libs/config.ts.jinja @@ -1,2 +1,2 @@ -export const APP_TITLE = "{{ project_title }}" +export const APP_TITLE = "{{ project_title }}"; export const AGENT_NAME = import.meta.env.VITE_LLAMA_DEPLOY_DEPLOYMENT_NAME;